diff --git a/.github/ci_commit_pins/xla.txt b/.github/ci_commit_pins/xla.txt index 20e7a6132861..75b1f91aa809 100644 --- a/.github/ci_commit_pins/xla.txt +++ b/.github/ci_commit_pins/xla.txt @@ -1 +1 @@ -8f913829abd9de749339d8d74b7357e1be3a7907 +fba464b199559f61faa720de8bf64cf955cfdce7 diff --git a/aten/src/ATen/VmapModeRegistrations.cpp b/aten/src/ATen/VmapModeRegistrations.cpp index 171a8dc9376d..ab4556c8c415 100644 --- a/aten/src/ATen/VmapModeRegistrations.cpp +++ b/aten/src/ATen/VmapModeRegistrations.cpp @@ -42,34 +42,34 @@ TORCH_LIBRARY_IMPL(aten, VmapMode, m) { #define TENSOROPTIONS c10::optional, c10::optional, c10::optional, c10::optional // random operations (out-of-place) - m.impl("bernoulli", unsupportedRandomOp&>); - m.impl("bernoulli.out", unsupportedRandomOp_&, Tensor&>); - m.impl("bernoulli.p", unsupportedRandomOp&>); - m.impl("bernoulli_.Tensor", unsupportedRandomOp_&>); - m.impl("bernoulli_.float", unsupportedRandomOp_&>); + m.impl("bernoulli", unsupportedRandomOp>); + m.impl("bernoulli.out", unsupportedRandomOp_, Tensor&>); + m.impl("bernoulli.p", unsupportedRandomOp>); + m.impl("bernoulli_.Tensor", unsupportedRandomOp_>); + m.impl("bernoulli_.float", unsupportedRandomOp_>); - m.impl("cauchy_", unsupportedRandomOp_&>); - m.impl("exponential_", unsupportedRandomOp_&>); - m.impl("geometric_", unsupportedRandomOp_&>); - m.impl("log_normal_", unsupportedRandomOp_&>); - m.impl("multinomial", unsupportedRandomOp&>); - m.impl("multinomial.out", unsupportedRandomOp_&, Tensor&>); + m.impl("cauchy_", unsupportedRandomOp_>); + m.impl("exponential_", unsupportedRandomOp_>); + m.impl("geometric_", unsupportedRandomOp_>); + m.impl("log_normal_", unsupportedRandomOp_>); + m.impl("multinomial", unsupportedRandomOp>); + m.impl("multinomial.out", unsupportedRandomOp_, Tensor&>); - m.impl("normal.Tensor_float", unsupportedRandomOp&>); - m.impl("normal.Tensor_float_out", unsupportedRandomOp_&, Tensor&>); - m.impl("normal.float_Tensor_out", unsupportedRandomOp_&, Tensor&>); - m.impl("normal.float_Tensor", unsupportedRandomOp&>); - m.impl("normal.Tensor_Tensor", unsupportedRandomOp&>); - m.impl("normal.Tensor_Tensor_out", unsupportedRandomOp_&, Tensor&>); - m.impl("normal.float_float", unsupportedRandomOp&, TENSOROPTIONS>); - m.impl("normal.float_float_out", unsupportedRandomOp_&, Tensor&>); - m.impl("normal_", unsupportedRandomOp_&>); + m.impl("normal.Tensor_float", unsupportedRandomOp>); + m.impl("normal.Tensor_float_out", unsupportedRandomOp_, Tensor&>); + m.impl("normal.float_Tensor_out", unsupportedRandomOp_, Tensor&>); + m.impl("normal.float_Tensor", unsupportedRandomOp>); + m.impl("normal.Tensor_Tensor", unsupportedRandomOp>); + m.impl("normal.Tensor_Tensor_out", unsupportedRandomOp_, Tensor&>); + m.impl("normal.float_float", unsupportedRandomOp, TENSOROPTIONS>); + m.impl("normal.float_float_out", unsupportedRandomOp_, Tensor&>); + m.impl("normal_", unsupportedRandomOp_>); - m.impl("poisson", unsupportedRandomOp&>); + m.impl("poisson", unsupportedRandomOp>); - m.impl("random_.from", unsupportedRandomOp_, const optional&>); - m.impl("random_.to", unsupportedRandomOp_&>); - m.impl("random_", unsupportedRandomOp_&>); + m.impl("random_.from", unsupportedRandomOp_, optional>); + m.impl("random_.to", unsupportedRandomOp_>); + m.impl("random_", unsupportedRandomOp_>); m.impl("rand_like", unsupportedRandomOp>); m.impl("randn_like", unsupportedRandomOp>); @@ -78,34 +78,34 @@ TORCH_LIBRARY_IMPL(aten, VmapMode, m) { m.impl("randint_like.low_dtype", unsupportedRandomOp>); m.impl("rand", unsupportedRandomOp); - m.impl("rand.generator", unsupportedRandomOp&, TENSOROPTIONS>); + m.impl("rand.generator", unsupportedRandomOp, TENSOROPTIONS>); m.impl("rand.names", unsupportedRandomOp, TENSOROPTIONS>); - m.impl("rand.generator_with_names", unsupportedRandomOp&, optional, TENSOROPTIONS>); + m.impl("rand.generator_with_names", unsupportedRandomOp, optional, TENSOROPTIONS>); m.impl("rand.out", unsupportedRandomOp_); - m.impl("rand.generator_out", unsupportedRandomOp_&, Tensor&>); + m.impl("rand.generator_out", unsupportedRandomOp_, Tensor&>); m.impl("randn", unsupportedRandomOp); - m.impl("randn.generator", unsupportedRandomOp&, TENSOROPTIONS>); + m.impl("randn.generator", unsupportedRandomOp, TENSOROPTIONS>); m.impl("randn.names", unsupportedRandomOp, TENSOROPTIONS>); - m.impl("randn.generator_with_names", unsupportedRandomOp&, optional, TENSOROPTIONS>); + m.impl("randn.generator_with_names", unsupportedRandomOp, optional, TENSOROPTIONS>); m.impl("randn.out", unsupportedRandomOp_); - m.impl("randn.generator_out", unsupportedRandomOp_&, Tensor&>); + m.impl("randn.generator_out", unsupportedRandomOp_, Tensor&>); m.impl("randperm", unsupportedRandomOp); - m.impl("randperm.generator", unsupportedRandomOp&, TENSOROPTIONS>); + m.impl("randperm.generator", unsupportedRandomOp, TENSOROPTIONS>); m.impl("randperm.out", unsupportedRandomOp_); - m.impl("randperm.generator_out", unsupportedRandomOp_&, Tensor&>); + m.impl("randperm.generator_out", unsupportedRandomOp_, Tensor&>); m.impl("randint", unsupportedRandomOp); - m.impl("randint.generator", unsupportedRandomOp&, TENSOROPTIONS>); + m.impl("randint.generator", unsupportedRandomOp, TENSOROPTIONS>); m.impl("randint.low", unsupportedRandomOp); - m.impl("randint.low_generator", unsupportedRandomOp&, TENSOROPTIONS>); + m.impl("randint.low_generator", unsupportedRandomOp, TENSOROPTIONS>); m.impl("randint.out", unsupportedRandomOp_); - m.impl("randint.generator_out", unsupportedRandomOp_&, Tensor&>); + m.impl("randint.generator_out", unsupportedRandomOp_, Tensor&>); m.impl("randint.low_out", unsupportedRandomOp_); - m.impl("randint.low_generator_out", unsupportedRandomOp_&, Tensor&>); + m.impl("randint.low_generator_out", unsupportedRandomOp_, Tensor&>); - m.impl("uniform_", unsupportedRandomOp_&>); + m.impl("uniform_", unsupportedRandomOp_>); #undef TENSOROPTIONS } diff --git a/aten/src/ATen/core/Generator.h b/aten/src/ATen/core/Generator.h index 444755718f83..8b00024be303 100644 --- a/aten/src/ATen/core/Generator.h +++ b/aten/src/ATen/core/Generator.h @@ -149,7 +149,7 @@ Generator make_generator(Args&&... args) { * the backend generator type (CPU/CUDAGeneratorImpl etc.) */ template -static inline T * check_generator(const std::optional& gen) { +static inline T * check_generator(c10::optional gen) { TORCH_CHECK(gen.has_value(), "Expected Generator but received nullopt"); TORCH_CHECK(gen->defined(), "Generator with undefined implementation is not allowed"); TORCH_CHECK(T::device_type() == gen->device().type(), "Expected a '", T::device_type(), "' device type for generator but found '", gen->device().type(), "'"); @@ -163,7 +163,7 @@ static inline T * check_generator(const std::optional& gen) { * the backend generator type (CPU/CUDAGeneratorImpl etc.) */ template -static inline T* get_generator_or_default(const std::optional& gen, const Generator& default_gen) { +static inline T* get_generator_or_default(const c10::optional& gen, const Generator& default_gen) { return gen.has_value() && gen->defined() ? check_generator(gen) : check_generator(default_gen); } diff --git a/aten/src/ATen/core/dispatch/DispatchKeyExtractor.h b/aten/src/ATen/core/dispatch/DispatchKeyExtractor.h index 35863a5a590f..33e910591de0 100644 --- a/aten/src/ATen/core/dispatch/DispatchKeyExtractor.h +++ b/aten/src/ATen/core/dispatch/DispatchKeyExtractor.h @@ -89,7 +89,7 @@ namespace detail { ts = ts | gen.key_set(); } } - void operator()(const std::optional& gen) { + void operator()(const c10::optional& gen) { if (gen.has_value() && gen->defined()) { ts = ts | gen->key_set(); } diff --git a/aten/src/ATen/functorch/BatchRulesBinaryOps.cpp b/aten/src/ATen/functorch/BatchRulesBinaryOps.cpp index b7d5472b310c..44ca2802bf3a 100644 --- a/aten/src/ATen/functorch/BatchRulesBinaryOps.cpp +++ b/aten/src/ATen/functorch/BatchRulesBinaryOps.cpp @@ -303,8 +303,8 @@ static std::tuple> log_sigmoid_backward_batch_rule( return std::make_tuple(at::log_sigmoid_backward(out_grad, out_self, out_buffer), 0); } -static Tensor binomial_wrapper(const Tensor& count, const Tensor& prob, const std::optional& gen) { - return at::binomial(count, prob.contiguous(), gen); // Bug in PyTorch, prob shouldn't need to be contiguous +static Tensor binomial_wrapper(const Tensor& count, const Tensor& prob, c10::optional gen) { + return at::binomial(count, prob.contiguous(), std::move(gen)); // Bug in PyTorch, prob shouldn't need to be contiguous } TORCH_LIBRARY_IMPL(aten, FuncTorchVmapMode, m) { diff --git a/aten/src/ATen/functorch/BatchRulesRandomness.cpp b/aten/src/ATen/functorch/BatchRulesRandomness.cpp index ada05422fc41..00d3e1d25b2e 100644 --- a/aten/src/ATen/functorch/BatchRulesRandomness.cpp +++ b/aten/src/ATen/functorch/BatchRulesRandomness.cpp @@ -58,7 +58,7 @@ Tensor& random_inplace_batching_rule(Tensor& self, ExtraArgs... extra_args) { } } -static Tensor& bernoulli_inplace_Tensor_batching_rule(Tensor& self, const Tensor& p_, const std::optional& gen) { +static Tensor& bernoulli_inplace_Tensor_batching_rule(Tensor& self, const Tensor& p_, c10::optional gen) { c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchVmapMode); auto maybe_layer = maybeCurrentDynamicLayer(); auto cur_level = maybe_layer->layerId(); @@ -94,11 +94,11 @@ static Tensor& bernoulli_inplace_Tensor_batching_rule(Tensor& self, const Tensor "If this is necessary for your usage, please file an issue with functorch."); if (randomness == RandomnessType::Same && self_bdim) { auto intermediate = empty(self.sizes(), self.options()); - intermediate.bernoulli_(other_, gen); + intermediate.bernoulli_(other_, std::move(gen)); self.copy_(intermediate); // batching should make this just work out... return self; } else { - self_.bernoulli_(other_, gen); + self_.bernoulli_(other_, std::move(gen)); return self; } } @@ -213,7 +213,7 @@ static std::tuple native_dropout_batching_rule(const Tensor& tens return std::make_tuple(output, mask); } -static Tensor multinomial_batching_rule(const Tensor& self, const int64_t num_samples, const bool replacement, const std::optional& generator) { +static Tensor multinomial_batching_rule(const Tensor& self, const int64_t num_samples, const bool replacement, const c10::optional generator) { c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchVmapMode); auto maybe_layer = maybeCurrentDynamicLayer(); const auto cur_level = maybe_layer->layerId(); diff --git a/aten/src/ATen/native/Activation.cpp b/aten/src/ATen/native/Activation.cpp index dfcea098cc00..7f5c696d1f6e 100644 --- a/aten/src/ATen/native/Activation.cpp +++ b/aten/src/ATen/native/Activation.cpp @@ -573,7 +573,7 @@ inline void _rrelu_with_noise_train( const Tensor& noise, const Scalar& lower_, const Scalar& upper_, - const std::optional& generator) { + c10::optional generator) { using opmath_t = at::opmath_type; opmath_t lower = lower_.to(); opmath_t upper = upper_.to(); @@ -604,7 +604,7 @@ Tensor& rrelu_with_noise_out_cpu(const Tensor& self, const Scalar& lower, const Scalar& upper, bool training, - const std::optional& generator, + c10::optional generator, Tensor& output) { if (training) { AT_DISPATCH_FLOATING_TYPES_AND(ScalarType::BFloat16, self.scalar_type(), "rrelu_with_noise_out_cpu", [&] { @@ -626,10 +626,10 @@ Tensor rrelu_with_noise_cpu( const Scalar& lower, const Scalar& upper, bool training, - const std::optional& generator) { + c10::optional generator) { auto output = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); return at::native::rrelu_with_noise_out_cpu( - self, noise, lower, upper, training, generator, output); + self, noise, lower, upper, training, std::move(generator), output); } Tensor& rrelu_with_noise_cpu_( @@ -638,9 +638,9 @@ Tensor& rrelu_with_noise_cpu_( const Scalar& lower, const Scalar& upper, bool training, - const std::optional& generator) { + c10::optional generator) { return at::native::rrelu_with_noise_out_cpu( - self, noise, lower, upper, training, generator, self); + self, noise, lower, upper, training, std::move(generator), self); } Tensor rrelu_with_noise_backward( @@ -661,14 +661,14 @@ Tensor rrelu_with_noise_backward( } } -Tensor rrelu(const Tensor & self, const Scalar& lower, const Scalar& upper, bool training, const std::optional& generator) { +Tensor rrelu(const Tensor & self, const Scalar& lower, const Scalar& upper, bool training, c10::optional generator) { TORCH_CHECK(lower.to() <= upper.to(), "Lower bound should be less than or equal to the upper bound") - return at::rrelu_with_noise(self, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT), lower, upper, training, generator); + return at::rrelu_with_noise(self, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT), lower, upper, training, std::move(generator)); } -Tensor & rrelu_(Tensor & self, const Scalar& lower, const Scalar& upper, bool training, const std::optional& generator) { +Tensor & rrelu_(Tensor & self, const Scalar& lower, const Scalar& upper, bool training, c10::optional generator) { TORCH_CHECK(lower.to() <= upper.to(), "Lower bound should be less than or equal to the upper bound") - return at::rrelu_with_noise_(self, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT), lower, upper, training, generator); + return at::rrelu_with_noise_(self, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT), lower, upper, training, std::move(generator)); } TORCH_IMPL_FUNC(threshold_out)(const Tensor& self, const Scalar& threshold, const Scalar& value, const Tensor& result) { diff --git a/aten/src/ATen/native/DistributionTemplates.h b/aten/src/ATen/native/DistributionTemplates.h index 00e7704cdc8c..a5ed9526c270 100644 --- a/aten/src/ATen/native/DistributionTemplates.h +++ b/aten/src/ATen/native/DistributionTemplates.h @@ -81,7 +81,7 @@ int64_t update_to(int64_t to) { } template class random_kernel, typename RNG> -at::Tensor& random_impl(at::Tensor& self, const std::optional& generator) { +at::Tensor& random_impl(at::Tensor& self, c10::optional generator) { CHECK_EMPTY_AND_RETURN(self); auto iter = at::TensorIterator::borrowing_nullary_op(self); random_kernel()(iter, generator); @@ -132,7 +132,7 @@ static void check_from_to_in_range(int64_t from, int64_t to_inc, caffe2::TypeMet } template class random_from_to_kernel, typename RNG> -at::Tensor& random_from_to_impl(at::Tensor& self, int64_t from, c10::optional to_opt, const std::optional& generator) { +at::Tensor& random_from_to_impl(at::Tensor& self, int64_t from, c10::optional to_opt, c10::optional generator) { uint64_t range = 0; auto iter = at::TensorIterator::borrowing_nullary_op(self); if (to_opt.has_value()) { @@ -200,7 +200,7 @@ at::Tensor& random_from_to_impl(at::Tensor& self, int64_t from, c10::optional= 0.0, "normal expects std >= 0.0, but found std ", std); template class normal_kernel, typename RNG> -Tensor& normal_impl_(Tensor& self, double mean, double std, const std::optional& gen) { +Tensor& normal_impl_(Tensor& self, double mean, double std, c10::optional gen) { CHECK_NORMAL_STD(std); CHECK_EMPTY_AND_RETURN(self); @@ -216,7 +216,7 @@ Tensor& normal_impl_(Tensor& self, double mean, double std, const std::optional< } template class normal_kernel, typename RNG> -Tensor& normal_out_impl(Tensor& output, const Tensor& mean, double std, const std::optional& gen) { +Tensor& normal_out_impl(Tensor& output, const Tensor& mean, double std, c10::optional gen) { CHECK_NORMAL_STD(std); auto std_tensor = at::empty_like(output, MemoryFormat::Contiguous); auto shape = at::infer_size(mean.sizes(), std_tensor.sizes()); @@ -227,7 +227,7 @@ Tensor& normal_out_impl(Tensor& output, const Tensor& mean, double std, const st } template class normal_kernel, typename RNG> -Tensor& normal_out_impl(Tensor& output, double mean, const Tensor& std, const std::optional& gen) { +Tensor& normal_out_impl(Tensor& output, double mean, const Tensor& std, c10::optional gen) { CHECK_NORMAL_TENSOR_STD(std); auto mean_tensor = at::full({}, mean, output.options()); auto shape = at::infer_size(mean_tensor.sizes(), std.sizes()); @@ -242,7 +242,7 @@ Tensor& normal_out_impl(Tensor& output, double mean, const Tensor& std, const st } template class normal_kernel, typename RNG> -Tensor& normal_out_impl(Tensor& output, const Tensor& mean, const Tensor& std, const std::optional& gen) { +Tensor& normal_out_impl(Tensor& output, const Tensor& mean, const Tensor& std, c10::optional gen) { CHECK_NORMAL_TENSOR_STD(std); auto shape = at::infer_size(mean.sizes(), std.sizes()); at::native::resize_output(output, shape); @@ -256,7 +256,7 @@ Tensor& normal_out_impl(Tensor& output, const Tensor& mean, const Tensor& std, c } template class normal_kernel, typename RNG> -Tensor normal_impl(const Tensor& mean, double std, const std::optional& gen) { +Tensor normal_impl(const Tensor& mean, double std, c10::optional gen) { CHECK_NORMAL_STD(std); Tensor ret = at::empty_like(mean, MemoryFormat::Contiguous); normal_out_impl(ret, mean, std, gen); @@ -264,7 +264,7 @@ Tensor normal_impl(const Tensor& mean, double std, const std::optional class normal_kernel, typename RNG> -Tensor normal_impl(double mean, const Tensor& std, const std::optional& gen) { +Tensor normal_impl(double mean, const Tensor& std, c10::optional gen) { CHECK_NORMAL_TENSOR_STD(std); Tensor ret = at::empty_like(std, MemoryFormat::Contiguous); normal_out_impl(ret, mean, std, gen); @@ -272,7 +272,7 @@ Tensor normal_impl(double mean, const Tensor& std, const std::optional class normal_kernel, typename RNG> -Tensor normal_impl(const Tensor& mean, const Tensor& std, const std::optional& gen) { +Tensor normal_impl(const Tensor& mean, const Tensor& std, c10::optional gen) { CHECK_NORMAL_TENSOR_STD(std); auto shape = at::infer_size(mean.sizes(), std.sizes()); Tensor ret = at::empty(shape, mean.options(), MemoryFormat::Contiguous); @@ -283,7 +283,7 @@ Tensor normal_impl(const Tensor& mean, const Tensor& std, const std::optional class uniform_kernel, typename RNG> -at::Tensor& uniform_impl_(at::Tensor& self, double from, double to, const std::optional& generator) { +at::Tensor& uniform_impl_(at::Tensor& self, double from, double to, c10::optional generator) { if (self.is_complex()) { CHECK_EMPTY_AND_RETURN(self); auto float_tensor = at::view_as_real(self); @@ -313,7 +313,7 @@ at::Tensor& uniform_impl_(at::Tensor& self, double from, double to, const std::o // ================================================== LogNormal ======================================================= template class log_normal_kernel, typename RNG> -at::Tensor& log_normal_impl_(at::Tensor& self, double mean, double std, const std::optional& gen) { +at::Tensor& log_normal_impl_(at::Tensor& self, double mean, double std, c10::optional gen) { TORCH_CHECK(std > 0.0, "log_normal_ expects std > 0.0, but found std=", std); CHECK_EMPTY_AND_RETURN(self); auto iter = TensorIterator::borrowing_nullary_op(self); @@ -324,7 +324,7 @@ at::Tensor& log_normal_impl_(at::Tensor& self, double mean, double std, const st // =================================================== Geometric ====================================================== template class geometric_kernel, typename RNG> -Tensor& geometric_impl_(Tensor& self, double p, const std::optional& gen) { +Tensor& geometric_impl_(Tensor& self, double p, c10::optional gen) { TORCH_CHECK(0 < p && p < 1, "geometric_ expects p to be in (0, 1), but got p=", p); CHECK_EMPTY_AND_RETURN(self); auto iter = TensorIterator::borrowing_nullary_op(self); @@ -335,7 +335,7 @@ Tensor& geometric_impl_(Tensor& self, double p, const std::optional& // ================================================== Exponential ===================================================== template class exponential_kernel, typename RNG> -Tensor& exponential_impl_(Tensor& self, double lambda, const std::optional& gen) { +Tensor& exponential_impl_(Tensor& self, double lambda, c10::optional gen) { TORCH_CHECK(lambda > 0.0, "exponential_ expects lambda > 0.0, but found lambda=", lambda); CHECK_EMPTY_AND_RETURN(self); auto iter = TensorIterator::borrowing_nullary_op(self); @@ -346,7 +346,7 @@ Tensor& exponential_impl_(Tensor& self, double lambda, const std::optional class cauchy_kernel, typename RNG> -Tensor& cauchy_impl_(Tensor& self, double median, double sigma, const std::optional& gen) { +Tensor& cauchy_impl_(Tensor& self, double median, double sigma, c10::optional gen) { // TODO: instead of variable name 'sigma', use 'gamma' or 'scale' // the variance, squared sigma, is undefined for cauchy distribution TORCH_CHECK(sigma > 0.0, "cauchy_ expects sigma > 0.0, but found sigma=", sigma); @@ -360,7 +360,7 @@ Tensor& cauchy_impl_(Tensor& self, double median, double sigma, const std::optio // ==================================================== Bernoulli ===================================================== template class bernoulli_tensor_kernel, typename RNG> -Tensor& bernoulli_impl_(Tensor& self, const Tensor& p_, const std::optional& gen) { +Tensor& bernoulli_impl_(Tensor& self, const Tensor& p_, c10::optional gen) { CHECK_EMPTY_AND_RETURN(self); NoNamesGuard guard; at::assert_no_internal_overlap(self); @@ -369,7 +369,7 @@ Tensor& bernoulli_impl_(Tensor& self, const Tensor& p_, const std::optional class bernoulli_scalar_kernel, typename RNG> -Tensor& bernoulli_impl_(Tensor& self, double p, const std::optional& gen) { +Tensor& bernoulli_impl_(Tensor& self, double p, c10::optional gen) { TORCH_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p); CHECK_EMPTY_AND_RETURN(self); at::assert_no_internal_overlap(self); @@ -378,7 +378,7 @@ Tensor& bernoulli_impl_(Tensor& self, double p, const std::optional& } template class bernoulli_tensor_kernel, typename RNG> -Tensor& bernoulli_out_impl(Tensor& result, const Tensor& self, const std::optional& gen) { +Tensor& bernoulli_out_impl(Tensor& result, const Tensor& self, c10::optional gen) { // result.resize_as_(self) requires self to have same dtype as result, so we // use resize_ instead. // TODO: Fix resize_as_. See pytorch/pytorch#11665. diff --git a/aten/src/ATen/native/Distributions.cpp b/aten/src/ATen/native/Distributions.cpp index 408871946cf5..4d4eb2efaf40 100644 --- a/aten/src/ATen/native/Distributions.cpp +++ b/aten/src/ATen/native/Distributions.cpp @@ -160,96 +160,96 @@ DEFINE_DISPATCH(random_full_64_bits_range_stub); template struct BernoulliStub { - void operator()(Tensor& self, const Tensor& p_, const std::optional& gen) { + void operator()(Tensor& self, const Tensor& p_, c10::optional gen) { bernoulli_tensor_stub(self.device().type(), self, p_, gen); } - void operator()(Tensor& self, double p, const std::optional& gen) { + void operator()(Tensor& self, double p, c10::optional gen) { bernoulli_scalar_stub(self.device().type(), self, p, gen); } }; -Tensor bernoulli(const Tensor& self, const std::optional& gen) { +Tensor bernoulli(const Tensor& self, c10::optional gen) { Tensor result = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); - result.bernoulli_(self, gen); + result.bernoulli_(self, std::move(gen)); return result; } -Tensor bernoulli(const Tensor& self, double p, const std::optional& gen) { +Tensor bernoulli(const Tensor& self, double p, c10::optional gen) { Tensor result = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); - result.bernoulli_(p, gen); + result.bernoulli_(p, std::move(gen)); return result; } -Tensor& bernoulli_out(const Tensor& self, const std::optional& gen, Tensor& result) { - return at::native::templates::bernoulli_out_impl(result, self, gen); +Tensor& bernoulli_out(const Tensor& self, c10::optional gen, Tensor& result) { + return at::native::templates::bernoulli_out_impl(result, self, std::move(gen)); } -Tensor& bernoulli_(Tensor& self, const Tensor& p_, const std::optional& gen) { - return at::native::templates::bernoulli_impl_(self, p_, gen); +Tensor& bernoulli_(Tensor& self, const Tensor& p_, c10::optional gen) { + return at::native::templates::bernoulli_impl_(self, p_, std::move(gen)); } -Tensor& bernoulli_(Tensor& self, double p, const std::optional& gen) { - return at::native::templates::bernoulli_impl_(self, p, gen); +Tensor& bernoulli_(Tensor& self, double p, c10::optional gen) { + return at::native::templates::bernoulli_impl_(self, p, std::move(gen)); } // ================================================== LogNormal ======================================================= template struct LogNormalStub { - void operator()(TensorIteratorBase& iter, double mean, double std, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, double mean, double std, c10::optional gen) { log_normal_stub(iter.device_type(), iter, mean, std, gen); } }; -Tensor& log_normal_(Tensor& self, double mean, double std, const std::optional& gen) { - return at::native::templates::log_normal_impl_(self, mean, std, gen); +Tensor& log_normal_(Tensor& self, double mean, double std, c10::optional gen) { + return at::native::templates::log_normal_impl_(self, mean, std, std::move(gen)); } // ==================================================== Cauchy ======================================================== template struct CauchyStub { - void operator()(TensorIteratorBase& iter, double median, double sigma, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, double median, double sigma, c10::optional gen) { cauchy_stub(iter.device_type(), iter, median, sigma, gen); } }; -Tensor& cauchy_(Tensor& self, double median, double sigma, const std::optional& gen) { - return at::native::templates::cauchy_impl_(self, median, sigma, gen); +Tensor& cauchy_(Tensor& self, double median, double sigma, c10::optional gen) { + return at::native::templates::cauchy_impl_(self, median, sigma, std::move(gen)); } // ================================================== Exponential ===================================================== template struct ExponentialStub { - void operator()(TensorIteratorBase& iter, double lambda, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, double lambda, c10::optional gen) { exponential_stub(iter.device_type(), iter, lambda, gen); } }; -Tensor& exponential_(Tensor& self, double lambda, const std::optional& gen) { - return at::native::templates::exponential_impl_(self, lambda, gen); +Tensor& exponential_(Tensor& self, double lambda, c10::optional gen) { + return at::native::templates::exponential_impl_(self, lambda, std::move(gen)); } // =================================================== Geometric ====================================================== template struct GeometricStub { - void operator()(TensorIteratorBase& iter, double p, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, double p, c10::optional gen) { geometric_stub(iter.device_type(), iter, p, gen); } }; -Tensor& geometric_(Tensor& self, double p, const std::optional& gen) { - return at::native::templates::geometric_impl_(self, p, gen); +Tensor& geometric_(Tensor& self, double p, c10::optional gen) { + return at::native::templates::geometric_impl_(self, p, std::move(gen)); } // ==================================================== Uniform ======================================================= template struct UniformStub { - void operator()(TensorIteratorBase& iter, double from, double to, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, double from, double to, c10::optional gen) { uniform_stub(iter.device_type(), iter, from, to, gen); } }; @@ -257,23 +257,23 @@ struct UniformStub { template struct UniformMeta { // No-op! - void operator()(TensorIteratorBase& iter, double from, double to, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, double from, double to, c10::optional gen) { } }; -Tensor& uniform_(Tensor& self, double from, double to, const std::optional& gen) { - return at::native::templates::uniform_impl_(self, from, to, gen); +Tensor& uniform_(Tensor& self, double from, double to, c10::optional gen) { + return at::native::templates::uniform_impl_(self, from, to, std::move(gen)); } -Tensor& uniform_meta_(Tensor& self, double from, double to, const std::optional& gen) { - return at::native::templates::uniform_impl_(self, from, to, gen); +Tensor& uniform_meta_(Tensor& self, double from, double to, c10::optional gen) { + return at::native::templates::uniform_impl_(self, from, to, std::move(gen)); } // ==================================================== Normal ======================================================== template struct NormalStub { - void operator()(Tensor& self, double mean, double std, const std::optional& gen) { + void operator()(Tensor& self, double mean, double std, c10::optional gen) { normal_stub(self.device().type(), self, mean, std, gen); } }; @@ -281,76 +281,76 @@ struct NormalStub { template struct NormalMeta { // No-op! - void operator()(Tensor& self, double mean, double std, const std::optional& gen) { + void operator()(Tensor& self, double mean, double std, c10::optional gen) { } }; // inplace -Tensor& normal_(Tensor& self, double mean, double std, const std::optional& gen) { - return at::native::templates::normal_impl_(self, mean, std, gen); +Tensor& normal_(Tensor& self, double mean, double std, c10::optional gen) { + return at::native::templates::normal_impl_(self, mean, std, std::move(gen)); } -Tensor& normal_meta_(Tensor& self, double mean, double std, const std::optional& gen) { - return at::native::templates::normal_impl_(self, mean, std, gen); +Tensor& normal_meta_(Tensor& self, double mean, double std, c10::optional gen) { + return at::native::templates::normal_impl_(self, mean, std, std::move(gen)); } // out tensor float -Tensor& normal_out(const Tensor& mean, double std, const std::optional& gen, Tensor& output) { - return at::native::templates::normal_out_impl(output, mean, std, gen); +Tensor& normal_out(const Tensor& mean, double std, c10::optional gen, Tensor& output) { + return at::native::templates::normal_out_impl(output, mean, std, std::move(gen)); } -Tensor& normal_out_meta(const Tensor& mean, double std, const std::optional& gen, Tensor& output) { - return at::native::templates::normal_out_impl(output, mean, std, gen); +Tensor& normal_out_meta(const Tensor& mean, double std, c10::optional gen, Tensor& output) { + return at::native::templates::normal_out_impl(output, mean, std, std::move(gen)); } // out float tensor -Tensor& normal_out(double mean, const Tensor& std, const std::optional& gen, Tensor& output) { - return at::native::templates::normal_out_impl(output, mean, std, gen); +Tensor& normal_out(double mean, const Tensor& std, c10::optional gen, Tensor& output) { + return at::native::templates::normal_out_impl(output, mean, std, std::move(gen)); } -Tensor& normal_out_meta(double mean, const Tensor& std, const std::optional& gen, Tensor& output) { - return at::native::templates::normal_out_impl(output, mean, std, gen); +Tensor& normal_out_meta(double mean, const Tensor& std, c10::optional gen, Tensor& output) { + return at::native::templates::normal_out_impl(output, mean, std, std::move(gen)); } // out tensor tensor -Tensor& normal_out(const Tensor& mean, const Tensor& std, const std::optional& gen, Tensor& output) { - return at::native::templates::normal_out_impl(output, mean, std, gen); +Tensor& normal_out(const Tensor& mean, const Tensor& std, c10::optional gen, Tensor& output) { + return at::native::templates::normal_out_impl(output, mean, std, std::move(gen)); } -Tensor& normal_out_meta(const Tensor& mean, const Tensor& std, const std::optional& gen, Tensor& output) { - return at::native::templates::normal_out_impl(output, mean, std, gen); +Tensor& normal_out_meta(const Tensor& mean, const Tensor& std, c10::optional gen, Tensor& output) { + return at::native::templates::normal_out_impl(output, mean, std, std::move(gen)); } // functional tensor float -Tensor normal(const Tensor& mean, double std, const std::optional& gen) { - return at::native::templates::normal_impl(mean, std, gen); +Tensor normal(const Tensor& mean, double std, c10::optional gen) { + return at::native::templates::normal_impl(mean, std, std::move(gen)); } -Tensor normal_meta(const Tensor& mean, double std, const std::optional& gen) { - return at::native::templates::normal_impl(mean, std, gen); +Tensor normal_meta(const Tensor& mean, double std, c10::optional gen) { + return at::native::templates::normal_impl(mean, std, std::move(gen)); } // functional float tensor -Tensor normal(double mean, const Tensor& std, const std::optional& gen) { - return at::native::templates::normal_impl(mean, std, gen); +Tensor normal(double mean, const Tensor& std, c10::optional gen) { + return at::native::templates::normal_impl(mean, std, std::move(gen)); } -Tensor normal_meta(double mean, const Tensor& std, const std::optional& gen) { - return at::native::templates::normal_impl(mean, std, gen); +Tensor normal_meta(double mean, const Tensor& std, c10::optional gen) { + return at::native::templates::normal_impl(mean, std, std::move(gen)); } // functional tensor tensor -Tensor normal(const Tensor& mean, const Tensor& std, const std::optional& gen) { - return at::native::templates::normal_impl(mean, std, gen); +Tensor normal(const Tensor& mean, const Tensor& std, c10::optional gen) { + return at::native::templates::normal_impl(mean, std, std::move(gen)); } -Tensor normal_meta(const Tensor& mean, const Tensor& std, const std::optional& gen) { - return at::native::templates::normal_impl(mean, std, gen); +Tensor normal_meta(const Tensor& mean, const Tensor& std, c10::optional gen) { + return at::native::templates::normal_impl(mean, std, std::move(gen)); } // functional variant, only used by the functionalization pass. -Tensor normal_functional(const Tensor& self, double mean, double std, const std::optional& generator) { +Tensor normal_functional(const Tensor& self, double mean, double std, c10::optional generator) { return self.clone().normal_(mean, std, std::move(generator)); } @@ -358,44 +358,44 @@ Tensor normal_functional(const Tensor& self, double mean, double std, const std: template struct RandomStub { - void operator()(TensorIteratorBase& iter, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, c10::optional gen) { random_stub(iter.device_type(), iter, gen); } }; -Tensor& random_(Tensor& self, const std::optional& gen) { - return at::native::templates::random_impl(self, gen); +Tensor& random_(Tensor& self, c10::optional gen) { + return at::native::templates::random_impl(self, std::move(gen)); } template struct RandomFromToStub { - void operator()(TensorIteratorBase& iter, uint64_t range, int64_t from, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, uint64_t range, int64_t from, c10::optional gen) { random_from_to_stub(iter.device_type(), iter, range, from, gen); } - void operator()(TensorIteratorBase& iter, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, c10::optional gen) { random_full_64_bits_range_stub(iter.device_type(), iter, gen); } }; -Tensor& random_(Tensor& self, int64_t from, optional to, const std::optional& gen) { - return at::native::templates::random_from_to_impl(self, from, to, gen); +Tensor& random_(Tensor& self, int64_t from, optional to, c10::optional gen) { + return at::native::templates::random_from_to_impl(self, from, to, std::move(gen)); } -Tensor& random_(Tensor& self, int64_t to, const std::optional& gen) { - return random_(self, 0, to, gen); +Tensor& random_(Tensor& self, int64_t to, c10::optional gen) { + return random_(self, 0, to, std::move(gen)); } -Tensor& random_meta_(Tensor& self, const std::optional& gen) { +Tensor& random_meta_(Tensor& self, c10::optional gen) { // No error checking yay return self; } -Tensor& random_meta_(Tensor& self, int64_t from, optional to, const std::optional& gen) { +Tensor& random_meta_(Tensor& self, int64_t from, optional to, c10::optional gen) { // No error checking yay return self; } -Tensor& random_meta_(Tensor& self, int64_t to, const std::optional& gen) { +Tensor& random_meta_(Tensor& self, int64_t to, c10::optional gen) { // No error checking yay return self; } @@ -437,7 +437,7 @@ Tensor _dirichlet_grad_cpu(const Tensor& x, const Tensor& alpha, const Tensor& t * This section is a counterpart to Distributions.cu */ -Tensor _s_binomial_cpu(const Tensor& count, const Tensor& prob, const std::optional& gen) { +Tensor _s_binomial_cpu(const Tensor& count, const Tensor& prob, c10::optional gen) { Tensor ret = at::zeros(count.sizes(), count.options()); auto iter = TensorIteratorConfig() .add_output(ret) @@ -462,7 +462,7 @@ Tensor _s_binomial_cpu(const Tensor& count, const Tensor& prob, const std::optio return ret; } -Tensor _s_poisson_cpu(const Tensor& lambda, const std::optional& gen) { +Tensor _s_poisson_cpu(const Tensor& lambda, c10::optional gen) { Tensor ret = at::zeros(lambda.sizes(), lambda.options()); auto iter = TensorIteratorConfig() .add_output(ret) @@ -479,7 +479,7 @@ Tensor _s_poisson_cpu(const Tensor& lambda, const std::optional& gen) return ret; } -Tensor _s_gamma_cpu(const Tensor& alpha, const std::optional& gen) { +Tensor _s_gamma_cpu(const Tensor& alpha, c10::optional gen) { Tensor ret = at::zeros(alpha.sizes(), alpha.options()); auto iter = TensorIteratorConfig() .add_output(ret) @@ -509,7 +509,7 @@ Tensor _s_gamma_cpu(const Tensor& alpha, const std::optional& gen) { return ret; } -Tensor _s_dirichlet_cpu(const Tensor& alpha, const std::optional& gen) { +Tensor _s_dirichlet_cpu(const Tensor& alpha, c10::optional gen) { Tensor ret = at::zeros(alpha.sizes(), alpha.options()); AT_DISPATCH_FLOATING_TYPES(ret.scalar_type(), "dirichlet", [&] { Tensor gamma = at::zeros(alpha.sizes(), alpha.options().dtype(ScalarType::Double)); @@ -562,7 +562,7 @@ constexpr int64_t FLOAT32_MAX_CONSECUTIVE_INT = 1 << (FLT_MANT_DIG); Tensor& multinomial_out(const Tensor& self, int64_t n_sample, bool with_replacement, - const std::optional& gen, + c10::optional gen, Tensor& result) { TORCH_CHECK( result.device() == self.device(), @@ -622,7 +622,7 @@ Tensor& multinomial_out(const Tensor& self, // s = argmax( p / (-log(eps)) ) where eps ~ U(0, 1). // We can also simplify the formula above by // s = argmax( p / q ) where q ~ Exp(1) - Tensor q = at::empty_like(self).exponential_(1, gen); + Tensor q = at::empty_like(self).exponential_(1, std::move(gen)); // In theory the probability to generate 0 from exponential distribution is // 0. However, on CUDA side there is a protection to avoid 0s, but on CPU // side, there is a very low probability to generate 0 from @@ -647,9 +647,9 @@ Tensor multinomial( const Tensor& self, int64_t n_sample, bool with_replacement, - const std::optional& gen) { + c10::optional gen) { Tensor result = at::empty({0}, self.options().dtype(kLong)); - native::multinomial_out(self, n_sample, with_replacement, gen, result); + native::multinomial_out(self, n_sample, with_replacement, std::move(gen), result); return result; } diff --git a/aten/src/ATen/native/TensorFactories.cpp b/aten/src/ATen/native/TensorFactories.cpp index bbe5311b8051..add09e2f81e6 100644 --- a/aten/src/ATen/native/TensorFactories.cpp +++ b/aten/src/ATen/native/TensorFactories.cpp @@ -878,10 +878,10 @@ Tensor rand(IntArrayRef size, c10::optional layout, c10::optional device, c10::optional pin_memory) { - return native::rand(size, static_cast&>(c10::nullopt), dtype, layout, device, pin_memory); + return native::rand(size, static_cast>(c10::nullopt), dtype, layout, device, pin_memory); } -Tensor rand(IntArrayRef size, const std::optional& generator, +Tensor rand(IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, @@ -897,7 +897,7 @@ Tensor& rand_out(IntArrayRef size, Tensor& result) { return native::rand_out(size, c10::nullopt, result); } -Tensor& rand_out(IntArrayRef size, const std::optional& generator, Tensor& result) { +Tensor& rand_out(IntArrayRef size, c10::optional generator, Tensor& result) { result.resize_(size); return result.uniform_(0, 1, std::move(generator)); } @@ -929,7 +929,7 @@ Tensor randint(int64_t high, IntArrayRef size, Tensor randint( int64_t high, IntArrayRef size, - const std::optional& generator, + c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, @@ -952,7 +952,7 @@ Tensor randint( int64_t low, int64_t high, IntArrayRef size, - const std::optional& generator, + c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, @@ -970,7 +970,7 @@ Tensor& randint_out(int64_t high, IntArrayRef size, Tensor& result) { Tensor& randint_out(int64_t high, IntArrayRef size, - const std::optional& generator, + c10::optional generator, Tensor& result) { result.resize_(size); return result.random_(0, high, std::move(generator)); @@ -983,7 +983,7 @@ Tensor& randint_out(int64_t low, int64_t high, IntArrayRef size, Tensor& result) Tensor& randint_out(int64_t low, int64_t high, IntArrayRef size, - const std::optional& generator, + c10::optional generator, Tensor& result) { result.resize_(size); return result.random_(low, high, std::move(generator)); @@ -1027,10 +1027,10 @@ Tensor randn(IntArrayRef size, c10::optional layout, c10::optional device, c10::optional pin_memory) { - return native::randn(size, static_cast&>(c10::nullopt), dtype, layout, device, pin_memory); + return native::randn(size, static_cast>(c10::nullopt), dtype, layout, device, pin_memory); } -Tensor randn(IntArrayRef size, const std::optional& generator, +Tensor randn(IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, @@ -1046,13 +1046,13 @@ Tensor& randn_out(IntArrayRef size, Tensor& result) { return native::randn_out(size, c10::nullopt, result); } -Tensor& randn_out(IntArrayRef size, const std::optional& generator, Tensor& result) { +Tensor& randn_out(IntArrayRef size, c10::optional generator, Tensor& result) { result.resize_(size); return result.normal_(0, 1, std::move(generator)); } Tensor normal(double mean, double std, IntArrayRef size, - const std::optional& generator, + c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, @@ -1065,7 +1065,7 @@ Tensor normal(double mean, double std, IntArrayRef size, } Tensor& normal_out(double mean, double std, - IntArrayRef size, const std::optional& generator, Tensor& result) { + IntArrayRef size, c10::optional generator, Tensor& result) { result.resize_(size); return result.normal_(mean, std, std::move(generator)); } @@ -1120,7 +1120,7 @@ Tensor randperm(int64_t n, return native::randperm(n, c10::nullopt, dtype, layout, device, pin_memory); } -Tensor randperm(int64_t n, const std::optional& generator, +Tensor randperm(int64_t n, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, @@ -1140,7 +1140,7 @@ Tensor& randperm_out(int64_t n, Tensor& result) { return at::randperm_out(result, n, c10::nullopt); } -Tensor& randperm_out_cpu(int64_t n, const std::optional& generator, Tensor& result) { +Tensor& randperm_out_cpu(int64_t n, c10::optional generator, Tensor& result) { TORCH_CHECK(n >= 0, "n must be non-negative, got", n); TORCH_CHECK(!generator.has_value() || (generator.has_value() && result.device() == generator->device()), "Expected a '", result.device(), "' generator device but found '", generator->device(), "'"); check_supported_max_int_with_precision(n, result); @@ -1809,7 +1809,7 @@ Tensor randn( Tensor randn( IntArrayRef size, - const std::optional& generator, + c10::optional generator, optional names, c10::optional dtype, c10::optional layout, @@ -1834,7 +1834,7 @@ Tensor rand( Tensor rand( IntArrayRef size, - const std::optional& generator, + c10::optional generator, optional names, c10::optional dtype, c10::optional layout, diff --git a/aten/src/ATen/native/UnaryOps.h b/aten/src/ATen/native/UnaryOps.h index e791a2f8dc22..91d4d84d4630 100644 --- a/aten/src/ATen/native/UnaryOps.h +++ b/aten/src/ATen/native/UnaryOps.h @@ -93,23 +93,23 @@ DECLARE_DISPATCH(unary_fn, special_scaled_modified_bessel_k1_stub); DECLARE_DISPATCH(unary_fn, special_spherical_bessel_j0_stub); // NB: these are actually defined in Distribution -DECLARE_DISPATCH(void(*)(const TensorBase&, const TensorBase&, const std::optional&), bernoulli_tensor_stub); -DECLARE_DISPATCH(void(*)(const TensorBase&, const double, const std::optional&), bernoulli_scalar_stub); -DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, const std::optional&), cauchy_stub); -DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const std::optional&), exponential_stub); -DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const std::optional&), geometric_stub); -DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, const std::optional&), log_normal_stub); -DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, const std::optional&), uniform_stub); -DECLARE_DISPATCH(void(*)(const TensorBase&, const double, const double, const std::optional&), normal_stub); -DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const uint64_t, const int64_t, const std::optional&), random_from_to_stub); -DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const std::optional&), random_full_64_bits_range_stub); -DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const std::optional&), random_stub); +DECLARE_DISPATCH(void(*)(const TensorBase&, const TensorBase&, c10::optional), bernoulli_tensor_stub); +DECLARE_DISPATCH(void(*)(const TensorBase&, const double, c10::optional), bernoulli_scalar_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, c10::optional), cauchy_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, c10::optional), exponential_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, c10::optional), geometric_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, c10::optional), log_normal_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, c10::optional), uniform_stub); +DECLARE_DISPATCH(void(*)(const TensorBase&, const double, const double, c10::optional), normal_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const uint64_t, const int64_t, c10::optional), random_from_to_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, c10::optional), random_full_64_bits_range_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, c10::optional), random_stub); DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const int64_t, const double), kaiser_window_stub); DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const int64_t), polygamma_stub); DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const Scalar& a, const Scalar& b), clamp_stub); DECLARE_DISPATCH( - void (*)(Tensor&, const Tensor&, int64_t, const std::optional&), + void (*)(Tensor&, const Tensor&, int64_t, c10::optional), multinomial_with_replacement_stub); DECLARE_DISPATCH( void (*)( diff --git a/aten/src/ATen/native/cpu/DistributionKernels.cpp b/aten/src/ATen/native/cpu/DistributionKernels.cpp index c12ee3ac9fc6..6dce481853ac 100644 --- a/aten/src/ATen/native/cpu/DistributionKernels.cpp +++ b/aten/src/ATen/native/cpu/DistributionKernels.cpp @@ -26,27 +26,27 @@ namespace at::native { namespace { -static void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, const std::optional& gen) { +static void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, c10::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::cauchy_kernel(iter, median, sigma, generator); } -void bernoulli_tensor_kernel(const TensorBase &self, const TensorBase &p_, const std::optional& gen) { +void bernoulli_tensor_kernel(const TensorBase &self, const TensorBase &p_, c10::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::bernoulli_kernel(self, p_, generator); } #if !AT_MKL_ENABLED() -void bernoulli_scalar_kernel_default(const TensorBase &self, double p, const std::optional& gen) { +void bernoulli_scalar_kernel_default(const TensorBase &self, double p, c10::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::bernoulli_kernel(self, p, generator); } -void bernoulli_scalar_kernel(const TensorBase &self, double p, const std::optional& gen) { +void bernoulli_scalar_kernel(const TensorBase &self, double p, c10::optional gen) { bernoulli_scalar_kernel_default(self, p, gen); } #else -void bernoulli_scalar_kernel(const TensorBase &self, double p, const std::optional& gen) { +void bernoulli_scalar_kernel(const TensorBase &self, double p, c10::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); int64_t seed; { @@ -99,17 +99,17 @@ void bernoulli_scalar_kernel(const TensorBase &self, double p, const std::option } #endif -static void exponential_kernel_default(TensorIteratorBase& iter, double lambda, const std::optional& gen) { +static void exponential_kernel_default(TensorIteratorBase& iter, double lambda, c10::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::exponential_kernel(iter, lambda, generator); } #if (!AT_MKL_ENABLED() || defined(FBCODE_CAFFE2)) -void exponential_kernel(TensorIteratorBase& iter, double lambda, const std::optional& gen) { +void exponential_kernel(TensorIteratorBase& iter, double lambda, c10::optional gen) { exponential_kernel_default(iter, lambda, gen); } #else -void exponential_kernel(TensorIteratorBase &iter, double lambda, const std::optional& gen) { +void exponential_kernel(TensorIteratorBase &iter, double lambda, c10::optional gen) { TORCH_CHECK(isFloatingType(iter.dtype()), "Exponential distribution is a continuous probability distribution. dtype must be a floating point but you specified ", iter.dtype()); Tensor self = iter.tensor(0); @@ -195,32 +195,32 @@ void exponential_kernel(TensorIteratorBase &iter, double lambda, const std::opti } #endif -static void geometric_kernel(TensorIteratorBase& iter, double p, const std::optional& gen) { +static void geometric_kernel(TensorIteratorBase& iter, double p, c10::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::geometric_kernel(iter, p, generator); } -static void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, const std::optional& gen) { +static void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, c10::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::log_normal_kernel(iter, mean, std, generator); } -void uniform_kernel(TensorIteratorBase& iter, double from, double to, const std::optional& gen) { +void uniform_kernel(TensorIteratorBase& iter, double from, double to, c10::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::uniform_kernel(iter, from, to, generator); } -void normal_kernel(const TensorBase &self, double mean, double std, const std::optional& gen) { +void normal_kernel(const TensorBase &self, double mean, double std, c10::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::normal_kernel(self, mean, std, generator); } -static void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, const std::optional& gen) { +static void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::random_from_to_kernel(iter, range, base, generator); } -static void random_kernel(TensorIteratorBase& iter, const std::optional& gen) { +static void random_kernel(TensorIteratorBase& iter, c10::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::random_kernel(iter, generator); } @@ -228,7 +228,7 @@ static void random_kernel(TensorIteratorBase& iter, const std::optional::lowest() // to(exclusive) = None (= std::numeric_limits::max() + 1) -static void random_full_64_bits_range_kernel(TensorIteratorBase& iter, const std::optional& gen) { +static void random_full_64_bits_range_kernel(TensorIteratorBase& iter, c10::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::random_full_64_bits_range_kernel(iter, generator); } diff --git a/aten/src/ATen/native/cpu/DistributionTemplates.h b/aten/src/ATen/native/cpu/DistributionTemplates.h index 541929350c95..1a1039b916f8 100644 --- a/aten/src/ATen/native/cpu/DistributionTemplates.h +++ b/aten/src/ATen/native/cpu/DistributionTemplates.h @@ -58,10 +58,10 @@ void random_full_64_bits_range_kernel(TensorIteratorBase& iter, RNG generator) { template struct RandomFromToKernel { - void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional gen) { random_from_to_kernel(iter, range, base, check_generator(gen)); } - void operator()(TensorIteratorBase& iter, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, c10::optional gen) { random_full_64_bits_range_kernel(iter, check_generator(gen)); } }; @@ -79,7 +79,7 @@ void random_kernel(TensorIteratorBase& iter, RNG generator) { template struct RandomKernel { - void operator()(TensorIteratorBase& iter, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, c10::optional gen) { random_kernel(iter, check_generator(gen)); } }; @@ -200,7 +200,7 @@ void normal_kernel(const TensorBase &self, double mean, double std, RNG generato template struct NormalKernel { - void operator()(Tensor& self, double mean, double std, const std::optional& gen) { + void operator()(Tensor& self, double mean, double std, c10::optional gen) { normal_kernel(self, mean, std, check_generator(gen)); } }; @@ -222,7 +222,7 @@ void uniform_kernel(TensorIteratorBase& iter, double from_, double to_, RNG gene template struct UniformKernel { - void operator()(TensorIteratorBase& iter, double from, double to, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, double from, double to, c10::optional gen) { uniform_kernel(iter, from, to, check_generator(gen)); } }; @@ -242,7 +242,7 @@ void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, RNG ge template struct CauchyKernel { - void operator()(TensorIteratorBase& iter, double median, double sigma, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, double median, double sigma, c10::optional gen) { cauchy_kernel(iter, median, sigma, check_generator(gen)); } }; @@ -262,7 +262,7 @@ void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, RNG ge template struct LogNormalKernel { - void operator()(TensorIteratorBase& iter, double mean, double std, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, double mean, double std, c10::optional gen) { log_normal_kernel(iter, mean, std, check_generator(gen)); } }; @@ -282,7 +282,7 @@ void geometric_kernel(TensorIteratorBase& iter, double p, RNG generator) { template struct GeometricKernel { - void operator()(TensorIteratorBase& iter, double p, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, double p, c10::optional gen) { geometric_kernel(iter, p, check_generator(gen)); } }; @@ -303,7 +303,7 @@ void exponential_kernel(TensorIteratorBase& iter, double lambda, RNG generator) template struct ExponentialKernel { - void operator()(TensorIteratorBase& iter, double lambda, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, double lambda, c10::optional gen) { exponential_kernel(iter, lambda, check_generator(gen)); } }; @@ -358,10 +358,10 @@ void bernoulli_kernel(const TensorBase &self, double p, RNG generator) { template struct BernoulliKernel { - void operator()(const TensorBase &self, double p, const std::optional& gen) { + void operator()(const TensorBase &self, double p, c10::optional gen) { bernoulli_kernel(self, p, check_generator(gen)); } - void operator()(const TensorBase &self, const TensorBase &p_, const std::optional& gen) { + void operator()(const TensorBase &self, const TensorBase &p_, c10::optional gen) { bernoulli_kernel(self, p_, check_generator(gen)); } }; diff --git a/aten/src/ATen/native/cpu/MultinomialKernel.cpp b/aten/src/ATen/native/cpu/MultinomialKernel.cpp index 720b108f72c2..c5c2eebb5d35 100644 --- a/aten/src/ATen/native/cpu/MultinomialKernel.cpp +++ b/aten/src/ATen/native/cpu/MultinomialKernel.cpp @@ -24,7 +24,7 @@ multinomial_with_replacement_apply( Tensor& result, const Tensor& self, const int64_t n_sample, - const std::optional& generator) { + c10::optional generator) { auto gen = get_generator_or_default( generator, detail::getDefaultCPUGenerator()); // See Note [Acquire lock when using random generators] @@ -128,7 +128,7 @@ multinomial_with_replacement_apply( Tensor& result, const Tensor& self, const int64_t n_sample, - const std::optional& generator) { + c10::optional generator) { auto gen = get_generator_or_default( generator, detail::getDefaultCPUGenerator()); // See Note [Acquire lock when using random generators] @@ -230,7 +230,7 @@ static void multinomial_with_replacement_kernel_impl( Tensor& result, const Tensor& self, const int64_t n_sample, - const std::optional& gen) { + c10::optional gen) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, self.scalar_type(), "multinomial", [&] { multinomial_with_replacement_apply( diff --git a/aten/src/ATen/native/cuda/DistributionBernoulli.cu b/aten/src/ATen/native/cuda/DistributionBernoulli.cu index 298a1b1cd0e2..89a518267d25 100644 --- a/aten/src/ATen/native/cuda/DistributionBernoulli.cu +++ b/aten/src/ATen/native/cuda/DistributionBernoulli.cu @@ -23,12 +23,12 @@ namespace at::native { -void bernoulli_tensor_kernel(const TensorBase &self, const TensorBase &p_, const std::optional& gen_) { +void bernoulli_tensor_kernel(const TensorBase &self, const TensorBase &p_, c10::optional gen_) { auto generator = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::bernoulli_kernel(self, p_, generator); } -void bernoulli_scalar_kernel(const TensorBase &self, double p, const std::optional& gen) { +void bernoulli_scalar_kernel(const TensorBase &self, double p, c10::optional gen) { auto iter = TensorIterator::borrowing_nullary_op(self); auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::bernoulli_kernel(iter, p, generator); diff --git a/aten/src/ATen/native/cuda/DistributionCauchyKernel.cu b/aten/src/ATen/native/cuda/DistributionCauchyKernel.cu index 5e5cd012033d..a66d3cf3288f 100644 --- a/aten/src/ATen/native/cuda/DistributionCauchyKernel.cu +++ b/aten/src/ATen/native/cuda/DistributionCauchyKernel.cu @@ -5,7 +5,7 @@ namespace at::native { -void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, const std::optional& gen) { +void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, c10::optional gen) { auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::cauchy_kernel(iter, median, sigma, generator); } diff --git a/aten/src/ATen/native/cuda/DistributionExponentialKernel.cu b/aten/src/ATen/native/cuda/DistributionExponentialKernel.cu index 3890470efdf6..76cb94f6fd87 100644 --- a/aten/src/ATen/native/cuda/DistributionExponentialKernel.cu +++ b/aten/src/ATen/native/cuda/DistributionExponentialKernel.cu @@ -5,7 +5,7 @@ namespace at::native { -void exponential_kernel(TensorIteratorBase& iter, double lambda, const std::optional& gen) { +void exponential_kernel(TensorIteratorBase& iter, double lambda, c10::optional gen) { auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::exponential_kernel(iter, lambda, generator); } diff --git a/aten/src/ATen/native/cuda/DistributionGeometricKernel.cu b/aten/src/ATen/native/cuda/DistributionGeometricKernel.cu index 092cb47c50a5..0fe49d7bbd4b 100644 --- a/aten/src/ATen/native/cuda/DistributionGeometricKernel.cu +++ b/aten/src/ATen/native/cuda/DistributionGeometricKernel.cu @@ -5,7 +5,7 @@ namespace at::native { -void geometric_kernel(TensorIteratorBase& iter, double p_, const std::optional& gen) { +void geometric_kernel(TensorIteratorBase& iter, double p_, c10::optional gen) { auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::geometric_kernel(iter, p_, generator); } diff --git a/aten/src/ATen/native/cuda/DistributionLogNormalKernel.cu b/aten/src/ATen/native/cuda/DistributionLogNormalKernel.cu index d49cadf8d545..f394d4fea39d 100644 --- a/aten/src/ATen/native/cuda/DistributionLogNormalKernel.cu +++ b/aten/src/ATen/native/cuda/DistributionLogNormalKernel.cu @@ -5,7 +5,7 @@ namespace at::native { -void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, const std::optional& gen) { +void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, c10::optional gen) { auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::log_normal_kernel(iter, mean, std, generator); } diff --git a/aten/src/ATen/native/cuda/DistributionNormal.cu b/aten/src/ATen/native/cuda/DistributionNormal.cu index 45e4e20c22ca..a17c3e3da055 100644 --- a/aten/src/ATen/native/cuda/DistributionNormal.cu +++ b/aten/src/ATen/native/cuda/DistributionNormal.cu @@ -5,7 +5,7 @@ namespace at::native { -void normal_kernel(const TensorBase &self, double mean, double std, const std::optional& gen) { +void normal_kernel(const TensorBase &self, double mean, double std, c10::optional gen) { auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::normal_kernel(self, mean, std, generator); } diff --git a/aten/src/ATen/native/cuda/DistributionRandomKernel.cu b/aten/src/ATen/native/cuda/DistributionRandomKernel.cu index 734f775e0f3a..034a19c512f4 100644 --- a/aten/src/ATen/native/cuda/DistributionRandomKernel.cu +++ b/aten/src/ATen/native/cuda/DistributionRandomKernel.cu @@ -5,17 +5,17 @@ namespace at::native { -void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, const std::optional& gen_) { +void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional gen_) { auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::random_from_to_kernel(iter, range, base, gen); } -void random_full_64_bits_range_kernel(TensorIteratorBase& iter, const std::optional& gen_) { +void random_full_64_bits_range_kernel(TensorIteratorBase& iter, c10::optional gen_) { auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::random_full_64_bits_range_kernel(iter, gen); } -void random_kernel(TensorIteratorBase& iter, const std::optional& gen_) { +void random_kernel(TensorIteratorBase& iter, c10::optional gen_) { auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::random_kernel(iter, gen); } diff --git a/aten/src/ATen/native/cuda/DistributionTemplates.h b/aten/src/ATen/native/cuda/DistributionTemplates.h index 8e371386409d..04a278d83f76 100644 --- a/aten/src/ATen/native/cuda/DistributionTemplates.h +++ b/aten/src/ATen/native/cuda/DistributionTemplates.h @@ -352,10 +352,10 @@ void random_full_64_bits_range_kernel(TensorIteratorBase& iter, RNG gen) { template struct RandomFromToKernel { - void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional gen) { random_from_to_kernel(iter, range, base, check_generator(gen)); } - void operator()(TensorIteratorBase& iter, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, c10::optional gen) { random_full_64_bits_range_kernel(iter, check_generator(gen)); } }; @@ -448,7 +448,7 @@ void normal_kernel(const TensorBase &self, double mean_, double std_, RNG gen) { template struct NormalKernel { - void operator()(const TensorBase &self, double mean, double std, const std::optional& gen) { + void operator()(const TensorBase &self, double mean, double std, c10::optional gen) { normal_kernel(self, mean, std, check_generator(gen)); } }; @@ -481,7 +481,7 @@ void uniform_kernel(TensorIteratorBase& iter, double from_, double to_, RNG gen) template struct UniformKernel { - void operator()(TensorIteratorBase& iter, double from, double to, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, double from, double to, c10::optional gen) { uniform_kernel(iter, from, to, check_generator(gen)); } }; @@ -504,7 +504,7 @@ void log_normal_kernel(TensorIteratorBase& iter, double mean_, double std_, RNG template struct LogNormalKernel { - void operator()(TensorIteratorBase& iter, double mean, double std, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, double mean, double std, c10::optional gen) { log_normal_kernel(iter, mean, std, check_generator(gen)); } }; @@ -525,7 +525,7 @@ void geometric_kernel(TensorIteratorBase& iter, double p, RNG gen) { template struct GeometricKernel { - void operator()(TensorIteratorBase& iter, double p, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, double p, c10::optional gen) { geometric_kernel(iter, p, check_generator(gen)); } }; @@ -548,7 +548,7 @@ void exponential_kernel(TensorIteratorBase& iter, double lambda_, RNG gen) { template struct ExponentialKernel { - void operator()(TensorIteratorBase& iter, double lambda, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, double lambda, c10::optional gen) { exponential_kernel(iter, lambda, check_generator(gen)); } }; @@ -571,7 +571,7 @@ void cauchy_kernel(TensorIteratorBase& iter, double median_, double sigma_, RNG template struct CauchyKernel { - void operator()(TensorIteratorBase& iter, double median, double sigma, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, double median, double sigma, c10::optional gen) { cauchy_kernel(iter, median, sigma, check_generator(gen)); } }; @@ -661,10 +661,10 @@ void bernoulli_kernel(TensorIteratorBase& iter, double p, RNG gen) { template struct BernoulliKernel { - void operator()(TensorIteratorBase& iter, double p, const std::optional& gen) { + void operator()(TensorIteratorBase& iter, double p, c10::optional gen) { bernoulli_kernel(iter, p, check_generator(gen)); } - void operator()(const TensorBase &self, const TensorBase &p_, const std::optional& gen) { + void operator()(const TensorBase &self, const TensorBase &p_, c10::optional gen) { bernoulli_kernel(self, p_, check_generator(gen)); } }; diff --git a/aten/src/ATen/native/cuda/DistributionUniform.cu b/aten/src/ATen/native/cuda/DistributionUniform.cu index b83649e9fb6a..2ebdfa446459 100644 --- a/aten/src/ATen/native/cuda/DistributionUniform.cu +++ b/aten/src/ATen/native/cuda/DistributionUniform.cu @@ -5,7 +5,7 @@ namespace at::native { -void uniform_kernel(TensorIteratorBase& iter, double from, double to, const std::optional& gen) { +void uniform_kernel(TensorIteratorBase& iter, double from, double to, c10::optional gen) { auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator()); templates::cuda::uniform_kernel(iter, from, to, generator); } diff --git a/aten/src/ATen/native/cuda/Distributions.cpp b/aten/src/ATen/native/cuda/Distributions.cpp index 5804fd34cab7..c0d5abb49bf6 100644 --- a/aten/src/ATen/native/cuda/Distributions.cpp +++ b/aten/src/ATen/native/cuda/Distributions.cpp @@ -18,14 +18,14 @@ namespace at::native { -Tensor _s_poisson_cuda(const Tensor& lambda, const std::optional& gen_) { +Tensor _s_poisson_cuda(const Tensor& lambda, c10::optional gen_) { auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator()); Tensor ret = at::empty(lambda.sizes(), lambda.options()); launch_poisson_cuda_kernel(ret, lambda, gen); return ret; } -Tensor _s_binomial_cuda(const Tensor& count, const Tensor& prob, const std::optional& gen_) { +Tensor _s_binomial_cuda(const Tensor& count, const Tensor& prob, c10::optional gen_) { auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator()); Tensor ret = at::empty(count.sizes(), count.options()); at::TensorIterator iter = at::TensorIteratorConfig() @@ -37,14 +37,14 @@ Tensor _s_binomial_cuda(const Tensor& count, const Tensor& prob, const std::opti return ret; } -Tensor _s_gamma_cuda(const Tensor& alpha, const std::optional& gen_) { +Tensor _s_gamma_cuda(const Tensor& alpha, c10::optional gen_) { auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator()); Tensor ret = at::empty(alpha.sizes(), alpha.options()); launch_gamma_kernel(ret, alpha, gen); return ret; } -Tensor _s_dirichlet_cuda(const Tensor& alpha, const std::optional& gen_) { +Tensor _s_dirichlet_cuda(const Tensor& alpha, c10::optional gen_) { auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator()); Tensor ret = at::empty(alpha.sizes(), alpha.options()); launch_gamma_kernel(ret, alpha, gen); diff --git a/aten/src/ATen/native/cuda/Dropout.cu b/aten/src/ATen/native/cuda/Dropout.cu index 85aac0274a57..67ea3e4f832b 100644 --- a/aten/src/ATen/native/cuda/Dropout.cu +++ b/aten/src/ATen/native/cuda/Dropout.cu @@ -387,7 +387,7 @@ native_dropout_cuda(const Tensor& self, double p, c10::optional train){ // TODO: _fused_dropout_cuda is to be removed, see PR #63937 std::tuple -fused_dropout_cuda(const Tensor& self, double p, const std::optional& gen_){ +fused_dropout_cuda(const Tensor& self, double p, c10::optional gen_){ auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator()); return dropout_cuda(gen, self, p); } diff --git a/aten/src/ATen/native/cuda/MultinomialKernel.cu b/aten/src/ATen/native/cuda/MultinomialKernel.cu index 01e647d04a0a..d8f142a813f8 100644 --- a/aten/src/ATen/native/cuda/MultinomialKernel.cu +++ b/aten/src/ATen/native/cuda/MultinomialKernel.cu @@ -328,7 +328,7 @@ void multinomial_with_replacement_kernel_impl( Tensor& result, const Tensor& self, const int64_t n_sample, - const std::optional& generator) { + c10::optional generator) { auto gen = get_generator_or_default(generator, cuda::detail::getDefaultCUDAGenerator()); int inputSize = self.dim(); diff --git a/aten/src/ATen/native/cuda/Randperm.cu b/aten/src/ATen/native/cuda/Randperm.cu index 098522723b66..c22c99dfe6a7 100644 --- a/aten/src/ATen/native/cuda/Randperm.cu +++ b/aten/src/ATen/native/cuda/Randperm.cu @@ -55,7 +55,7 @@ namespace { template struct alignas(N) OpaqueType { char data[N]; }; } -Tensor& randperm_out_cuda(int64_t n, const std::optional& generator, Tensor& result) { +Tensor& randperm_out_cuda(int64_t n, c10::optional generator, Tensor& result) { TORCH_CHECK(n >= 0, "n must be non-negative, got", n); check_supported_max_int_with_precision(n, result); diff --git a/aten/src/ATen/native/cuda/Randperm.cuh b/aten/src/ATen/native/cuda/Randperm.cuh index a7c31a33b71c..de5affebb8bd 100644 --- a/aten/src/ATen/native/cuda/Randperm.cuh +++ b/aten/src/ATen/native/cuda/Randperm.cuh @@ -40,7 +40,7 @@ __global__ void randperm_handle_duplicate_keys_kernel(T *keys, scalar_t *data, T // See note [Algorithm of randperm] template -void randperm_handle_duplicate_keys(T *keys, scalar_t *data, int bits, int64_t n, const std::optional &gen_) { +void randperm_handle_duplicate_keys(T *keys, scalar_t *data, int bits, int64_t n, c10::optional &gen_) { auto gen = at::get_generator_or_default(gen_, at::cuda::detail::getDefaultCUDAGenerator()); int64_t counter_offset = n; at::PhiloxCudaState rng_engine_inputs; diff --git a/aten/src/ATen/native/cuda/RreluWithNoise.cu b/aten/src/ATen/native/cuda/RreluWithNoise.cu index 5d0e1284c45c..463a5ce00c81 100644 --- a/aten/src/ATen/native/cuda/RreluWithNoise.cu +++ b/aten/src/ATen/native/cuda/RreluWithNoise.cu @@ -74,7 +74,7 @@ inline void _rrelu_with_noise_cuda_train( const Tensor& noise_, const Scalar& lower_, const Scalar& upper_, - const std::optional& generator) { + c10::optional generator) { auto input = input_.contiguous(); auto noise = noise_.contiguous(); Tensor tmp_output = output.contiguous(); @@ -142,7 +142,7 @@ Tensor& rrelu_with_noise_out_cuda(const Tensor& self, const Scalar& lower, const Scalar& upper, bool training, - const std::optional& generator, + c10::optional generator, Tensor& output) { at::native::resize_output(output, self.sizes()); @@ -176,7 +176,7 @@ Tensor rrelu_with_noise_cuda( const Scalar& lower, const Scalar& upper, bool training, - const std::optional& generator) { + c10::optional generator) { Tensor output = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); return at::native::rrelu_with_noise_out_cuda(self, noise, lower, upper, training, generator, output); } @@ -187,7 +187,7 @@ Tensor& rrelu_with_noise_cuda_( const Scalar& lower, const Scalar& upper, bool training, - const std::optional& generator) { + c10::optional generator) { return at::native::rrelu_with_noise_out_cuda( self, noise, lower, upper, training, generator, self); } diff --git a/aten/src/ATen/native/mps/operations/Distributions.mm b/aten/src/ATen/native/mps/operations/Distributions.mm index 01437fe7f11d..7ed06c8bf437 100644 --- a/aten/src/ATen/native/mps/operations/Distributions.mm +++ b/aten/src/ATen/native/mps/operations/Distributions.mm @@ -52,7 +52,7 @@ Tensor& random_mps_impl(Tensor& self, const c10::optional& mean_opt, const c10::optional& std_opt, MPSGraphRandomDistribution distribution, - const std::optional& gen, + c10::optional gen, std::string op_name, RandomOpBlock randomBlock) { if (self.numel() == 0) { @@ -144,7 +144,7 @@ static Tensor& normal_mps_impl(Tensor& self, double std_s, const c10::optional& mean_opt, const c10::optional& std_opt, - const std::optional& gen, + c10::optional gen, std::string op_name) { const Tensor& std_t = *(at::borrow_from_optional_tensor(std_opt)); const Tensor& mean_t = *(at::borrow_from_optional_tensor(mean_opt)); @@ -198,7 +198,7 @@ static Tensor& normal_mps_impl(Tensor& self, static Tensor& bernoulli_mps_impl(Tensor& self, const Tensor& prob_t, - const std::optional& gen, + c10::optional gen, std::string op_name) { TORCH_CHECK(prob_t.is_same_size(self) || prob_t.dim() == 0, op_name, @@ -225,7 +225,7 @@ static Tensor& bernoulli_mps_impl(Tensor& self, } // namespace mps -Tensor& uniform_mps_(Tensor& self, double from, double to, const std::optional& gen) { +Tensor& uniform_mps_(Tensor& self, double from, double to, c10::optional gen) { auto scalar_type = self.scalar_type(); if (scalar_type == ScalarType::ComplexFloat) scalar_type = ScalarType::Float; @@ -257,16 +257,16 @@ Tensor& uniform_mps_(Tensor& self, double from, double to, const std::optional& gen) { +Tensor& normal_mps_(Tensor& self, double mean, double std, c10::optional gen) { return mps::normal_mps_impl(self, mean, std, c10::nullopt, c10::nullopt, gen, "normal"); } -Tensor normal_mps(const Tensor& mean, double std, const std::optional& gen) { +Tensor normal_mps(const Tensor& mean, double std, c10::optional gen) { Tensor self = at::empty(mean.sizes(), mean.scalar_type(), c10::nullopt, kMPS, c10::nullopt, c10::nullopt); return mps::normal_mps_impl(self, 0.0, std, mean, c10::nullopt, gen, "normal"); } -Tensor normal_mps(double mean, const Tensor& std, const std::optional& gen) { +Tensor normal_mps(double mean, const Tensor& std, c10::optional gen) { Tensor self = at::empty(std.sizes(), std.scalar_type(), c10::nullopt, kMPS, c10::nullopt, c10::nullopt); // when there's no tensor-type mean, we cannot pass scalar mean value due to the order of // multiply/add ops in random computation. So we create a mean tensor instead. @@ -274,45 +274,45 @@ Tensor normal_mps(double mean, const Tensor& std, const std::optional return mps::normal_mps_impl(self, 0.0, 1.0, mean_t, std, gen, "normal"); } -Tensor normal_mps(const Tensor& mean, const Tensor& std, const std::optional& gen) { +Tensor normal_mps(const Tensor& mean, const Tensor& std, c10::optional gen) { auto shape = at::infer_size(mean.sizes(), std.sizes()); Tensor self = at::empty(shape, mean.scalar_type(), c10::nullopt, kMPS, c10::nullopt, c10::nullopt); return mps::normal_mps_impl(self, 0.0, 1.0, mean, std, gen, "normal"); } -Tensor& normal_mps_out(const Tensor& mean, double std, const std::optional& gen, Tensor& self) { +Tensor& normal_mps_out(const Tensor& mean, double std, c10::optional gen, Tensor& self) { return mps::normal_mps_impl(self, 0.0, std, mean, c10::nullopt, gen, "normal"); } -Tensor& normal_mps_out(double mean, const Tensor& std, const std::optional& gen, Tensor& self) { +Tensor& normal_mps_out(double mean, const Tensor& std, c10::optional gen, Tensor& self) { // when there's no tensor-type mean, we cannot pass scalar mean value due to the order of // multiply/add ops in random computation. So we create a mean tensor instead. Tensor mean_t = at::full_like(self, Scalar(mean)); return mps::normal_mps_impl(self, 0.0, 1.0, mean_t, std, gen, "normal"); } -Tensor& normal_mps_out(const Tensor& mean, const Tensor& std, const std::optional& gen, Tensor& self) { +Tensor& normal_mps_out(const Tensor& mean, const Tensor& std, c10::optional gen, Tensor& self) { TORCH_CHECK(mean.numel() == std.numel(), "normal_mps_out: mean and std must have same number of elements") return mps::normal_mps_impl(self, 0.0, 1.0, mean, std, gen, "normal"); } -Tensor& bernoulli_out_mps(const Tensor& p_, const std::optional& gen, Tensor& result) { +Tensor& bernoulli_out_mps(const Tensor& p_, c10::optional gen, Tensor& result) { result.resize_(p_.sizes()); return mps::bernoulli_mps_impl(result, p_, gen, __func__); } -Tensor& bernoulli_mps_(Tensor& self, double p, const std::optional& gen) { +Tensor& bernoulli_mps_(Tensor& self, double p, c10::optional gen) { TORCH_CHECK(0.0 <= p && p <= 1.0, "bernoulli_mps_ expects p to be in [0, 1], but got p=", p); Tensor prob_t = at::full({}, Scalar(p), c10::TensorOptions().dtype(kFloat).device(kMPS)); return mps::bernoulli_mps_impl(self, prob_t, gen, __func__); } -Tensor& bernoulli_mps_(Tensor& self, const Tensor& p_, const std::optional& gen) { +Tensor& bernoulli_mps_(Tensor& self, const Tensor& p_, c10::optional gen) { return mps::bernoulli_mps_impl(self, p_, gen, __func__); } // random_.from -Tensor& random_mps_(Tensor& self, int64_t from, c10::optional to_opt, const std::optional& gen) { +Tensor& random_mps_(Tensor& self, int64_t from, c10::optional to_opt, c10::optional gen) { auto input_dtype = self.scalar_type(); int64_t to = 0; @@ -372,16 +372,16 @@ Tensor& random_mps_(Tensor& self, int64_t from, c10::optional to_opt, c self, from, to - 1, c10::nullopt, c10::nullopt, MPSGraphRandomDistributionUniform, gen, __func__, nullptr); } -Tensor& random_mps_(Tensor& self, int64_t to, const std::optional& gen) { +Tensor& random_mps_(Tensor& self, int64_t to, c10::optional gen) { return random_mps_(self, 0, to, gen); } -Tensor& random_mps_(Tensor& self, const std::optional& gen) { +Tensor& random_mps_(Tensor& self, c10::optional gen) { return random_mps_(self, 0, c10::nullopt, gen); } // Exponential distribution -Tensor& exponential_mps_(Tensor& self, double lambda, const std::optional& gen) { +Tensor& exponential_mps_(Tensor& self, double lambda, c10::optional gen) { TORCH_CHECK(lambda > 0.0, "exponential_ expects lambda > 0.0, but found lambda=", lambda); mps::RandomOpBlock random_op_block = ^RandomOpFn(cachedGraph, randomTensor) { @@ -405,7 +405,7 @@ Tensor& exponential_mps_(Tensor& self, double lambda, const std::optional& generator, Tensor& result) { +Tensor& randperm_out_mps(int64_t n, c10::optional generator, Tensor& result) { if (!is_macos_13_or_newer()) { TORCH_WARN_ONCE("MPS: randperm op is supported natively starting from macOS 13.0. ", "Falling back on CPU. This may have performance implications."); @@ -453,7 +453,7 @@ Tensor& randperm_out_mps(int64_t n, const std::optional& generator, T static Tensor& multinomial_with_replacement_mps_kernel(const Tensor& self, const int64_t n_sample, - const std::optional& generator, + c10::optional generator, Tensor& result) { using namespace mps; @@ -581,7 +581,7 @@ constexpr int64_t FLOAT32_MAX_CONSECUTIVE_INT = 1 << (FLT_MANT_DIG); Tensor& multinomial_out_mps(const Tensor& self, int64_t n_sample, bool with_replacement, - const std::optional& gen, + c10::optional gen, Tensor& result) { TORCH_CHECK(result.device() == self.device(), "multinomial arguments must have the same device"); TORCH_CHECK(self.dim() > 0 && self.dim() <= 2, "prob_dist must be 1 or 2 dim"); @@ -652,10 +652,7 @@ Tensor& multinomial_out_mps(const Tensor& self, return result; } -Tensor multinomial_mps(const Tensor& self, - int64_t n_sample, - bool with_replacement, - const std::optional& gen) { +Tensor multinomial_mps(const Tensor& self, int64_t n_sample, bool with_replacement, c10::optional gen) { Tensor result = at::empty({0}, self.options().dtype(kLong)); multinomial_out_mps(self, n_sample, with_replacement, gen, result); return result; diff --git a/aten/src/ATen/native/nested/NestedTensorMath.cpp b/aten/src/ATen/native/nested/NestedTensorMath.cpp index 7e1666dd94fa..20c84899ab46 100644 --- a/aten/src/ATen/native/nested/NestedTensorMath.cpp +++ b/aten/src/ATen/native/nested/NestedTensorMath.cpp @@ -931,7 +931,7 @@ Tensor reshape_as_nested(const Tensor& self, const Tensor& other) { return self.reshape(sizes); } -Tensor& normal_nested_(Tensor& self, double mean, double std, const std::optional& gen) { +Tensor& normal_nested_(Tensor& self, double mean, double std, c10::optional gen) { const auto& self_buf = get_nested_tensor_impl(self)->get_buffer(); self_buf.normal_(mean, std, gen); return self; diff --git a/aten/src/ATen/native/sparse/SparseCsrTensorMath.cpp b/aten/src/ATen/native/sparse/SparseCsrTensorMath.cpp index 4ec69c72adf6..bff9842a2a3a 100644 --- a/aten/src/ATen/native/sparse/SparseCsrTensorMath.cpp +++ b/aten/src/ATen/native/sparse/SparseCsrTensorMath.cpp @@ -326,7 +326,7 @@ Tensor& normal_sparse_csr_( Tensor& self, double mean, double std, - const std::optional& gen) { + c10::optional gen) { return unary_op_inplace(self, &Tensor::normal_, mean, std, gen); } diff --git a/aten/src/ATen/native/transformers/cuda/flash_attn/flash_api.cpp b/aten/src/ATen/native/transformers/cuda/flash_attn/flash_api.cpp index 0d4e5994dfe5..8f6f7a9f357d 100644 --- a/aten/src/ATen/native/transformers/cuda/flash_attn/flash_api.cpp +++ b/aten/src/ATen/native/transformers/cuda/flash_attn/flash_api.cpp @@ -354,7 +354,7 @@ mha_fwd(const at::Tensor &q, // batch_size x seqlen_q x num_heads x head int window_size_left, int window_size_right, const bool return_softmax, - const std::optional& gen_) { + c10::optional gen_) { auto dprops = at::cuda::getCurrentDeviceProperties(); // bool is_sm75 = dprops->major == 7 && dprops->minor == 5; @@ -546,7 +546,7 @@ mha_varlen_fwd(const at::Tensor &q, // total_q x num_heads x head_size, total_q int window_size_left, int window_size_right, const bool return_softmax, - const std::optional& gen_) { + c10::optional gen_) { auto dprops = at::cuda::getCurrentDeviceProperties(); // bool is_sm75 = dprops->major == 7 && dprops->minor == 5; diff --git a/aten/src/ATen/native/transformers/cuda/flash_attn/flash_api.h b/aten/src/ATen/native/transformers/cuda/flash_attn/flash_api.h index 336d1b4ead9a..2745b28dca29 100644 --- a/aten/src/ATen/native/transformers/cuda/flash_attn/flash_api.h +++ b/aten/src/ATen/native/transformers/cuda/flash_attn/flash_api.h @@ -19,7 +19,7 @@ mha_fwd(const at::Tensor &q, // batch_size x seqlen_q x num_heads x head int window_size_left, int window_size_right, const bool return_softmax, - const std::optional& gen_); + c10::optional gen_); std::tuple mha_varlen_fwd(const at::Tensor &q, // total_q x num_heads x head_size, total_q := \sum_{i=0}^{b} s_i @@ -39,7 +39,7 @@ mha_varlen_fwd(const at::Tensor &q, // total_q x num_heads x head_size, total_q int window_size_left, int window_size_right, const bool return_softmax, - const std::optional& gen_); + c10::optional gen_); std::tuple diff --git a/aten/src/ATen/native/transformers/hip/flash_attn/flash_api.hip b/aten/src/ATen/native/transformers/hip/flash_attn/flash_api.hip index 76c8b1d48677..24eebee7a75a 100644 --- a/aten/src/ATen/native/transformers/hip/flash_attn/flash_api.hip +++ b/aten/src/ATen/native/transformers/hip/flash_attn/flash_api.hip @@ -113,7 +113,7 @@ mha_fwd(const at::Tensor &q, // batch_size x seqlen_q x num_heads x head int window_size_left, int window_size_right, const bool return_softmax, - const std::optional& gen_) { + c10::optional gen_) { check_gpu_arch(); auto q_dtype = q.dtype(); @@ -322,7 +322,7 @@ mha_varlen_fwd(const at::Tensor &q, // total_q x num_heads x head_size, total_q int window_size_left, int window_size_right, const bool return_softmax, - const std::optional& gen_) { + c10::optional gen_) { TORCH_CHECK(false, "mha_varlen_fwd not supported on ROCm"); diff --git a/aten/src/ATen/native/vulkan/ops/Random.cpp b/aten/src/ATen/native/vulkan/ops/Random.cpp index 23d95499010d..c266b1041703 100644 --- a/aten/src/ATen/native/vulkan/ops/Random.cpp +++ b/aten/src/ATen/native/vulkan/ops/Random.cpp @@ -16,7 +16,7 @@ Tensor& uniform_( Tensor& self, const double from, const double to, - const std::optional& /* not implemented */) { + const c10::optional /* not implemented */) { TORCH_CHECK( self.is_vulkan(), "Vulkan: In-place operator is only supported on Vulkan tensors."); @@ -75,7 +75,7 @@ Tensor& normal_( Tensor& self, const double mean, const double std, - const std::optional& /* not implemented */) { + const c10::optional /* not implemented */) { TORCH_CHECK( self.is_vulkan(), "Vulkan: In-place operator is only supported on Vulkan tensors."); diff --git a/aten/src/ATen/test/cpu_rng_test.cpp b/aten/src/ATen/test/cpu_rng_test.cpp index d860e5c02a64..ebc3eee12f3f 100644 --- a/aten/src/ATen/test/cpu_rng_test.cpp +++ b/aten/src/ATen/test/cpu_rng_test.cpp @@ -44,89 +44,89 @@ struct TestCPUGenerator : public c10::GeneratorImpl { // ==================================================== Random ======================================================== -Tensor& random_(Tensor& self, const std::optional& generator) { +Tensor& random_(Tensor& self, c10::optional generator) { return at::native::templates::random_impl(self, generator); } -Tensor& random_from_to(Tensor& self, int64_t from, optional to, const std::optional& generator) { +Tensor& random_from_to(Tensor& self, int64_t from, optional to, c10::optional generator) { return at::native::templates::random_from_to_impl(self, from, to, generator); } -Tensor& random_to(Tensor& self, int64_t to, const std::optional& generator) { +Tensor& random_to(Tensor& self, int64_t to, c10::optional generator) { return random_from_to(self, 0, to, generator); } // ==================================================== Normal ======================================================== -Tensor& normal_(Tensor& self, double mean, double std, const std::optional& gen) { +Tensor& normal_(Tensor& self, double mean, double std, c10::optional gen) { return at::native::templates::normal_impl_(self, mean, std, gen); } -Tensor& normal_Tensor_float_out(const Tensor& mean, double std, const std::optional& gen, Tensor& output) { +Tensor& normal_Tensor_float_out(const Tensor& mean, double std, c10::optional gen, Tensor& output) { return at::native::templates::normal_out_impl(output, mean, std, gen); } -Tensor& normal_float_Tensor_out(double mean, const Tensor& std, const std::optional& gen, Tensor& output) { +Tensor& normal_float_Tensor_out(double mean, const Tensor& std, c10::optional gen, Tensor& output) { return at::native::templates::normal_out_impl(output, mean, std, gen); } -Tensor& normal_Tensor_Tensor_out(const Tensor& mean, const Tensor& std, const std::optional& gen, Tensor& output) { +Tensor& normal_Tensor_Tensor_out(const Tensor& mean, const Tensor& std, c10::optional gen, Tensor& output) { return at::native::templates::normal_out_impl(output, mean, std, gen); } -Tensor normal_Tensor_float(const Tensor& mean, double std, const std::optional& gen) { +Tensor normal_Tensor_float(const Tensor& mean, double std, c10::optional gen) { return at::native::templates::normal_impl(mean, std, gen); } -Tensor normal_float_Tensor(double mean, const Tensor& std, const std::optional& gen) { +Tensor normal_float_Tensor(double mean, const Tensor& std, c10::optional gen) { return at::native::templates::normal_impl(mean, std, gen); } -Tensor normal_Tensor_Tensor(const Tensor& mean, const Tensor& std, const std::optional& gen) { +Tensor normal_Tensor_Tensor(const Tensor& mean, const Tensor& std, c10::optional gen) { return at::native::templates::normal_impl(mean, std, gen); } // ==================================================== Uniform ======================================================= -Tensor& uniform_(Tensor& self, double from, double to, const std::optional& generator) { +Tensor& uniform_(Tensor& self, double from, double to, c10::optional generator) { return at::native::templates::uniform_impl_(self, from, to, generator); } // ==================================================== Cauchy ======================================================== -Tensor& cauchy_(Tensor& self, double median, double sigma, const std::optional& generator) { +Tensor& cauchy_(Tensor& self, double median, double sigma, c10::optional generator) { return at::native::templates::cauchy_impl_(self, median, sigma, generator); } // ================================================== LogNormal ======================================================= -Tensor& log_normal_(Tensor& self, double mean, double std, const std::optional& gen) { +Tensor& log_normal_(Tensor& self, double mean, double std, c10::optional gen) { return at::native::templates::log_normal_impl_(self, mean, std, gen); } // ================================================== Geometric ======================================================= -Tensor& geometric_(Tensor& self, double p, const std::optional& gen) { +Tensor& geometric_(Tensor& self, double p, c10::optional gen) { return at::native::templates::geometric_impl_(self, p, gen); } // ================================================== Exponential ===================================================== -Tensor& exponential_(Tensor& self, double lambda, const std::optional& gen) { +Tensor& exponential_(Tensor& self, double lambda, c10::optional gen) { return at::native::templates::exponential_impl_(self, lambda, gen); } // ================================================== Bernoulli ======================================================= -Tensor& bernoulli_Tensor(Tensor& self, const Tensor& p_, const std::optional& gen) { +Tensor& bernoulli_Tensor(Tensor& self, const Tensor& p_, c10::optional gen) { return at::native::templates::bernoulli_impl_(self, p_, gen); } -Tensor& bernoulli_float(Tensor& self, double p, const std::optional& gen) { +Tensor& bernoulli_float(Tensor& self, double p, c10::optional gen) { return at::native::templates::bernoulli_impl_(self, p, gen); } -Tensor& bernoulli_out(const Tensor& self, const std::optional& gen, Tensor& result) { +Tensor& bernoulli_out(const Tensor& self, c10::optional gen, Tensor& result) { return at::native::templates::bernoulli_out_impl(result, self, gen); } diff --git a/aten/src/ATen/test/cuda_distributions_test.cu b/aten/src/ATen/test/cuda_distributions_test.cu index aab31af6b495..6f6cfcaaee55 100644 --- a/aten/src/ATen/test/cuda_distributions_test.cu +++ b/aten/src/ATen/test/cuda_distributions_test.cu @@ -174,7 +174,7 @@ TEST(RandomPermutationTest, TestIslandShuffle) { bool shuffled2 = false; for (int i = 0; i < 100; i++) { cudaDeviceSynchronize(); - std::optional gen = c10::nullopt; + c10::optional gen = c10::nullopt; randperm_handle_duplicate_keys(keys, values, 8, 5, gen); cudaDeviceSynchronize(); std::vector slice1 = {values[0], values[1], values[2]}; diff --git a/test/cpp_extensions/rng_extension.cpp b/test/cpp_extensions/rng_extension.cpp index a4da80e38e50..2e657d15a397 100644 --- a/test/cpp_extensions/rng_extension.cpp +++ b/test/cpp_extensions/rng_extension.cpp @@ -33,15 +33,15 @@ struct TestCPUGenerator : public c10::GeneratorImpl { uint64_t value_; }; -Tensor& random_(Tensor& self, const std::optional& generator) { +Tensor& random_(Tensor& self, c10::optional generator) { return at::native::templates::random_impl(self, generator); } -Tensor& random_from_to(Tensor& self, int64_t from, optional to, const std::optional& generator) { +Tensor& random_from_to(Tensor& self, int64_t from, optional to, c10::optional generator) { return at::native::templates::random_from_to_impl(self, from, to, generator); } -Tensor& random_to(Tensor& self, int64_t to, const std::optional& generator) { +Tensor& random_to(Tensor& self, int64_t to, c10::optional generator) { return random_from_to(self, 0, to, generator); } diff --git a/torch/csrc/jit/frontend/tracer.cpp b/torch/csrc/jit/frontend/tracer.cpp index e82b2cae9f46..823b27f30fcb 100644 --- a/torch/csrc/jit/frontend/tracer.cpp +++ b/torch/csrc/jit/frontend/tracer.cpp @@ -678,7 +678,7 @@ void addInputs( void addInputs( Node* n, const char* name, - const std::optional& value) { + const c10::optional& value) { Graph* g = n->owningGraph(); if (value.has_value() && value->defined()) { diff --git a/torch/csrc/jit/frontend/tracer.h b/torch/csrc/jit/frontend/tracer.h index 8081fac3ea28..f265d57b649d 100644 --- a/torch/csrc/jit/frontend/tracer.h +++ b/torch/csrc/jit/frontend/tracer.h @@ -340,7 +340,7 @@ TORCH_API void addInputs( TORCH_API void addInputs( Node* n, const char* name, - const std::optional& value); + const c10::optional& value); inline void addInputs( Node* n, diff --git a/torch/csrc/jit/runtime/register_special_ops.cpp b/torch/csrc/jit/runtime/register_special_ops.cpp index 944c696c4822..36ede6717ff9 100644 --- a/torch/csrc/jit/runtime/register_special_ops.cpp +++ b/torch/csrc/jit/runtime/register_special_ops.cpp @@ -406,7 +406,7 @@ RegisterOperators reg({ double a; // NOLINTNEXTLINE(cppcoreguidelines-init-variables) double b; - std::optional generator = + c10::optional generator = pop(stack).toOptional(); pop(stack, tensor, a, b); @@ -425,7 +425,7 @@ RegisterOperators reg({ double mean; // NOLINTNEXTLINE(cppcoreguidelines-init-variables) double std; - std::optional generator = + c10::optional generator = pop(stack).toOptional(); pop(stack, tensor, mean, std); diff --git a/torch/csrc/lazy/core/shape_inference.cpp b/torch/csrc/lazy/core/shape_inference.cpp index 6c0940a69dac..c1b3424c8df4 100644 --- a/torch/csrc/lazy/core/shape_inference.cpp +++ b/torch/csrc/lazy/core/shape_inference.cpp @@ -177,14 +177,14 @@ std::vector compute_shape_abs(const at::Tensor& self) { std::vector compute_shape_bernoulli( const at::Tensor& self, - const std::optional& generator) { + c10::optional generator) { return {Shape(self.scalar_type(), self.sizes().vec())}; } std::vector compute_shape_bernoulli( const at::Tensor& self, double p, - const std::optional& generator) { + c10::optional generator) { return compute_shape_bernoulli(self, generator); } @@ -692,14 +692,14 @@ std::vector compute_shape_native_dropout_backward( std::vector compute_shape_random( const at::Tensor& self, - const std::optional& generator) { + c10::optional generator) { return {Shape(self.scalar_type(), self.sizes().vec())}; } std::vector compute_shape_random( const at::Tensor& self, int64_t to, - const std::optional& generator) { + c10::optional generator) { return compute_shape_random(self, generator); } @@ -707,7 +707,7 @@ std::vector compute_shape_random( const at::Tensor& self, int64_t from, c10::optional to, - const std::optional& generator) { + c10::optional generator) { return compute_shape_random(self, generator); } @@ -1372,7 +1372,7 @@ std::vector compute_shape_normal_functional( const at::Tensor& self, double mean, double std, - const std::optional& generator) { + c10::optional generator) { return {Shape(self.scalar_type(), self.sizes().vec())}; } @@ -1380,7 +1380,7 @@ std::vector compute_shape_uniform( const at::Tensor& self, double from, double to, - const std::optional& generator) { + c10::optional generator) { return {Shape(self.scalar_type(), self.sizes().vec())}; } diff --git a/torch/csrc/lazy/core/shape_inference.h b/torch/csrc/lazy/core/shape_inference.h index e37a4ebb4d48..a8388a0b2235 100644 --- a/torch/csrc/lazy/core/shape_inference.h +++ b/torch/csrc/lazy/core/shape_inference.h @@ -24,8 +24,8 @@ TORCH_API std::vector compute_shape__adaptive_avg_pool3d(con TORCH_API std::vector compute_shape__adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self); TORCH_API std::vector compute_shape_abs(const at::Tensor & self); TORCH_API std::vector compute_shape_arange_out(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out); -TORCH_API std::vector compute_shape_bernoulli(const at::Tensor & self, const ::std::optional & generator); -TORCH_API std::vector compute_shape_bernoulli(const at::Tensor & self, double p, const ::std::optional & generator); +TORCH_API std::vector compute_shape_bernoulli(const at::Tensor & self, c10::optional generator); +TORCH_API std::vector compute_shape_bernoulli(const at::Tensor & self, double p, c10::optional generator); TORCH_API std::vector compute_shape_binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction); TORCH_API std::vector compute_shape_binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction); TORCH_API std::vector compute_shape_cat(at::TensorList tensors, int64_t dim); @@ -70,10 +70,10 @@ TORCH_API std::vector compute_shape_new_empty_strided(const TORCH_API std::vector compute_shape_nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight); TORCH_API std::vector compute_shape_nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index); TORCH_API std::vector compute_shape_nonzero(const at::Tensor & self); -TORCH_API std::vector compute_shape_normal_functional(const at::Tensor & self, double mean, double std, const ::std::optional & generator); -TORCH_API std::vector compute_shape_random(const at::Tensor & self, const ::std::optional & generator); -TORCH_API std::vector compute_shape_random(const at::Tensor & self, int64_t to, const ::std::optional & generator); -TORCH_API std::vector compute_shape_random(const at::Tensor & self, int64_t from, c10::optional to, const ::std::optional & generator); +TORCH_API std::vector compute_shape_normal_functional(const at::Tensor & self, double mean, double std, c10::optional generator); +TORCH_API std::vector compute_shape_random(const at::Tensor & self, c10::optional generator); +TORCH_API std::vector compute_shape_random(const at::Tensor & self, int64_t to, c10::optional generator); +TORCH_API std::vector compute_shape_random(const at::Tensor & self, int64_t from, c10::optional to, c10::optional generator); TORCH_API std::vector compute_shape_relu(const at::Tensor & self); TORCH_API std::vector compute_shape_repeat(const at::Tensor & self, at::IntArrayRef repeats); TORCH_API std::vector compute_shape_slogdet(const at::Tensor & self); @@ -92,7 +92,7 @@ TORCH_API std::vector compute_shape_narrow_copy_symint(const TORCH_API std::vector compute_shape_hardswish(const at::Tensor & self); TORCH_API std::vector compute_shape_hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self); TORCH_API std::vector compute_shape_selu(const at::Tensor & self); -TORCH_API std::vector compute_shape_uniform(const at::Tensor & self, double from, double to, const ::std::optional & generator); +TORCH_API std::vector compute_shape_uniform(const at::Tensor & self, double from, double to, c10::optional generator); // Non-Native ops TORCH_API std::vector compute_shape_scalar(const at::Scalar& value, const at::ScalarType& type); diff --git a/torch/csrc/utils/python_arg_parser.h b/torch/csrc/utils/python_arg_parser.h index aecd78435ef2..cec99a843301 100644 --- a/torch/csrc/utils/python_arg_parser.h +++ b/torch/csrc/utils/python_arg_parser.h @@ -246,7 +246,7 @@ struct PythonArgs { inline std::vector intlistWithDefault( int i, std::vector default_intlist); - inline std::optional generator(int i); + inline c10::optional generator(int i); inline at::Storage storage(int i); inline at::Storage storage( int i, @@ -1069,7 +1069,7 @@ inline bool PythonArgs::isNone(int i) { return args[i] == nullptr; } -inline std::optional PythonArgs::generator(int i) { +inline c10::optional PythonArgs::generator(int i) { if (!args[i]) return c10::nullopt; return reinterpret_cast(args[i])->cdata; diff --git a/torchgen/api/cpp.py b/torchgen/api/cpp.py index e08b8bc277c3..f5466030daa6 100644 --- a/torchgen/api/cpp.py +++ b/torchgen/api/cpp.py @@ -144,9 +144,6 @@ def argumenttype_type( remove_non_owning_ref_types=remove_non_owning_ref_types, ) if r is not None: - if isinstance(t, OptionalType) and not mutable: - if str(t.elem) == "Generator": - return NamedCType(binds, ConstRefCType(r.type)) return r if isinstance(t, BaseType): diff --git a/torchgen/api/types/types.py b/torchgen/api/types/types.py index 1a8ad3c7de77..16eff73638e4 100644 --- a/torchgen/api/types/types.py +++ b/torchgen/api/types/types.py @@ -12,7 +12,6 @@ if we want to generate code for another C++ library. Add new types to `types.py` if these types are ATen/c10 related. Add new types to `types_base.py` if they are basic and not attached to ATen/c10. """ - from dataclasses import dataclass from typing import Dict @@ -32,6 +31,7 @@ from .types_base import ( shortT, ) + TENSOR_LIST_LIKE_CTYPES = [ "at::TensorList", "const c10::List> &", @@ -133,13 +133,9 @@ class OptionalCType(CType): def cpp_type(self, *, strip_ref: bool = False) -> str: # Do not pass `strip_ref` recursively. - if "Generator" in self.elem.cpp_type(): - return f"::std::optional<{self.elem.cpp_type()}>" return f"c10::optional<{self.elem.cpp_type()}>" def cpp_type_registration_declarations(self) -> str: - if "Generator" in self.elem.cpp_type_registration_declarations(): - return f"::std::optional<{self.elem.cpp_type_registration_declarations()}>" return f"c10::optional<{self.elem.cpp_type_registration_declarations()}>" def remove_const_ref(self) -> "CType": diff --git a/torchgen/api/types/types_base.py b/torchgen/api/types/types_base.py index 127b26daa85c..2f8561e49abe 100644 --- a/torchgen/api/types/types_base.py +++ b/torchgen/api/types/types_base.py @@ -95,13 +95,11 @@ class ConstRefCType(CType): elem: "CType" def cpp_type(self, *, strip_ref: bool = False) -> str: - if isinstance(self.elem, ConstRefCType) or strip_ref: + if strip_ref: return self.elem.cpp_type(strip_ref=strip_ref) return f"const {self.elem.cpp_type()} &" def cpp_type_registration_declarations(self) -> str: - if isinstance(self.elem, ConstRefCType): - return self.elem.cpp_type_registration_declarations() return f"const {self.elem.cpp_type_registration_declarations()} &" def remove_const_ref(self) -> "CType": diff --git a/torchgen/dest/lazy_ir.py b/torchgen/dest/lazy_ir.py index 8ec3f7a327d9..43cde1e04043 100644 --- a/torchgen/dest/lazy_ir.py +++ b/torchgen/dest/lazy_ir.py @@ -16,7 +16,6 @@ from torchgen.api.translate import translate from torchgen.api.types import ( BaseCType, Binding, - ConstRefCType, deviceT, DispatcherSignature, kernel_signature, @@ -246,9 +245,7 @@ class GenLazyIR(ABC): value_args = schema.filtered_args(values=True, scalars=False) scalar_args = schema.filtered_args(values=False, scalars=True) - ctor_args = [ - f"{ConstRefCType(i.lazy_type).cpp_type()} {i.name}" for i in all_args - ] + ctor_args = [f"const {i.lazy_type.cpp_type()}& {i.name}" for i in all_args] reuse_ctor_args = ", ".join(ctor_args) if self.use_lazy_shape and schema.properties.ShapePrecompute: ctor_args.append("std::vector&& shapes")