[SR] Fix broken unit test build (#76111)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/76111

https://github.com/pytorch/pytorch/pull/68640 broke our build by porting `cat` structured kernels, not sure how CI didn't catch this
ghstack-source-id: 154335722

Test Plan: CI

Reviewed By: navahgar, ajyu

Differential Revision: D35780296

fbshipit-source-id: 0a262eb06a8d619227e5db10b6a775bf0b2e17c1
(cherry picked from commit aea6fbf9365391011df5211164e3978075d7a5cb)
This commit is contained in:
Mike Iovine
2022-04-20 11:29:53 -07:00
committed by PyTorch MergeBot
parent 8646e0dc28
commit b6a4234090

View File

@ -60,7 +60,7 @@ struct DeepAndWideFast : torch::nn::Module {
auto dp_unflatten = at::cpu::bmm(ad_emb_packed, user_emb_t);
// auto dp = at::native::flatten(dp_unflatten, 1);
auto dp = dp_unflatten.view({dp_unflatten.size(0), 1});
auto input = at::native::_cat_cpu({dp, wide_preproc}, 1);
auto input = at::cpu::cat({dp, wide_preproc}, 1);
// fc1 = torch::nn::functional::linear(input, fc_w_, fc_b_);
fc_w_t_ = torch::t(fc_w_);
@ -114,7 +114,7 @@ struct DeepAndWideFast : torch::nn::Module {
// Potential optimization: we can replace cat with carefully constructed
// tensor views on the output that are passed to the _out ops above.
at::native::_cat_out_cpu(
at::cpu::cat_outf(
{prealloc_tensors[5], prealloc_tensors[2]}, 1, prealloc_tensors[6]);
at::cpu::addmm_out(
prealloc_tensors[7], fc_b_, prealloc_tensors[6], fc_w_t_, 1, 1);