mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Revert "Change ATEN generator argument type to const std::optional<Generator>& (#120076)"
This reverts commit 4305c64fea154ee1ab566e19bd7568753fc30916. Reverted https://github.com/pytorch/pytorch/pull/120076 on behalf of https://github.com/izaitsevfb due to breaking internal builds(take 3) ([comment](https://github.com/pytorch/pytorch/pull/120076#issuecomment-1986338164))
This commit is contained in:
2
.github/ci_commit_pins/xla.txt
vendored
2
.github/ci_commit_pins/xla.txt
vendored
@ -1 +1 @@
|
||||
8f913829abd9de749339d8d74b7357e1be3a7907
|
||||
fba464b199559f61faa720de8bf64cf955cfdce7
|
||||
|
@ -42,34 +42,34 @@ TORCH_LIBRARY_IMPL(aten, VmapMode, m) {
|
||||
#define TENSOROPTIONS c10::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>
|
||||
|
||||
// random operations (out-of-place)
|
||||
m.impl("bernoulli", unsupportedRandomOp<const Tensor&, const optional<Generator>&>);
|
||||
m.impl("bernoulli.out", unsupportedRandomOp_<const Tensor&, const optional<Generator>&, Tensor&>);
|
||||
m.impl("bernoulli.p", unsupportedRandomOp<const Tensor&, double, const optional<Generator>&>);
|
||||
m.impl("bernoulli_.Tensor", unsupportedRandomOp_<Tensor&, const Tensor&, const optional<Generator>&>);
|
||||
m.impl("bernoulli_.float", unsupportedRandomOp_<Tensor&, double, const optional<Generator>&>);
|
||||
m.impl("bernoulli", unsupportedRandomOp<const Tensor&, optional<Generator>>);
|
||||
m.impl("bernoulli.out", unsupportedRandomOp_<const Tensor&, optional<Generator>, Tensor&>);
|
||||
m.impl("bernoulli.p", unsupportedRandomOp<const Tensor&, double, optional<Generator>>);
|
||||
m.impl("bernoulli_.Tensor", unsupportedRandomOp_<Tensor&, const Tensor&, optional<Generator>>);
|
||||
m.impl("bernoulli_.float", unsupportedRandomOp_<Tensor&, double, optional<Generator>>);
|
||||
|
||||
m.impl("cauchy_", unsupportedRandomOp_<Tensor&, double, double, const optional<Generator>&>);
|
||||
m.impl("exponential_", unsupportedRandomOp_<Tensor&, double, const optional<Generator>&>);
|
||||
m.impl("geometric_", unsupportedRandomOp_<Tensor&, double, const optional<Generator>&>);
|
||||
m.impl("log_normal_", unsupportedRandomOp_<Tensor&, double, double, const optional<Generator>&>);
|
||||
m.impl("multinomial", unsupportedRandomOp<const Tensor&, int64_t, bool, const optional<Generator>&>);
|
||||
m.impl("multinomial.out", unsupportedRandomOp_<const Tensor&, int64_t, bool, const optional<Generator>&, Tensor&>);
|
||||
m.impl("cauchy_", unsupportedRandomOp_<Tensor&, double, double, optional<Generator>>);
|
||||
m.impl("exponential_", unsupportedRandomOp_<Tensor&, double, optional<Generator>>);
|
||||
m.impl("geometric_", unsupportedRandomOp_<Tensor&, double, optional<Generator>>);
|
||||
m.impl("log_normal_", unsupportedRandomOp_<Tensor&, double, double, optional<Generator>>);
|
||||
m.impl("multinomial", unsupportedRandomOp<const Tensor&, int64_t, bool, optional<Generator>>);
|
||||
m.impl("multinomial.out", unsupportedRandomOp_<const Tensor&, int64_t, bool, optional<Generator>, Tensor&>);
|
||||
|
||||
m.impl("normal.Tensor_float", unsupportedRandomOp<const Tensor&, double, const optional<Generator>&>);
|
||||
m.impl("normal.Tensor_float_out", unsupportedRandomOp_<const Tensor&, double, const optional<Generator>&, Tensor&>);
|
||||
m.impl("normal.float_Tensor_out", unsupportedRandomOp_<double, const Tensor&, const optional<Generator>&, Tensor&>);
|
||||
m.impl("normal.float_Tensor", unsupportedRandomOp<double, const Tensor&, const optional<Generator>&>);
|
||||
m.impl("normal.Tensor_Tensor", unsupportedRandomOp<const Tensor&, const Tensor&, const optional<Generator>&>);
|
||||
m.impl("normal.Tensor_Tensor_out", unsupportedRandomOp_<const Tensor&, const Tensor&, const optional<Generator>&, Tensor&>);
|
||||
m.impl("normal.float_float", unsupportedRandomOp<double, double, IntArrayRef, const optional<Generator>&, TENSOROPTIONS>);
|
||||
m.impl("normal.float_float_out", unsupportedRandomOp_<double, double, IntArrayRef, const optional<Generator>&, Tensor&>);
|
||||
m.impl("normal_", unsupportedRandomOp_<Tensor&, double, double, const optional<Generator>&>);
|
||||
m.impl("normal.Tensor_float", unsupportedRandomOp<const Tensor&, double, optional<Generator>>);
|
||||
m.impl("normal.Tensor_float_out", unsupportedRandomOp_<const Tensor&, double, optional<Generator>, Tensor&>);
|
||||
m.impl("normal.float_Tensor_out", unsupportedRandomOp_<double, const Tensor&, optional<Generator>, Tensor&>);
|
||||
m.impl("normal.float_Tensor", unsupportedRandomOp<double, const Tensor&, optional<Generator>>);
|
||||
m.impl("normal.Tensor_Tensor", unsupportedRandomOp<const Tensor&, const Tensor&, optional<Generator>>);
|
||||
m.impl("normal.Tensor_Tensor_out", unsupportedRandomOp_<const Tensor&, const Tensor&, optional<Generator>, Tensor&>);
|
||||
m.impl("normal.float_float", unsupportedRandomOp<double, double, IntArrayRef, optional<Generator>, TENSOROPTIONS>);
|
||||
m.impl("normal.float_float_out", unsupportedRandomOp_<double, double, IntArrayRef, optional<Generator>, Tensor&>);
|
||||
m.impl("normal_", unsupportedRandomOp_<Tensor&, double, double, optional<Generator>>);
|
||||
|
||||
m.impl("poisson", unsupportedRandomOp<const Tensor&, const optional<Generator>&>);
|
||||
m.impl("poisson", unsupportedRandomOp<const Tensor&, optional<Generator>>);
|
||||
|
||||
m.impl("random_.from", unsupportedRandomOp_<Tensor&, int64_t, optional<int64_t>, const optional<Generator>&>);
|
||||
m.impl("random_.to", unsupportedRandomOp_<Tensor&, int64_t, const optional<Generator>&>);
|
||||
m.impl("random_", unsupportedRandomOp_<Tensor&, const optional<Generator>&>);
|
||||
m.impl("random_.from", unsupportedRandomOp_<Tensor&, int64_t, optional<int64_t>, optional<Generator>>);
|
||||
m.impl("random_.to", unsupportedRandomOp_<Tensor&, int64_t, optional<Generator>>);
|
||||
m.impl("random_", unsupportedRandomOp_<Tensor&, optional<Generator>>);
|
||||
|
||||
m.impl("rand_like", unsupportedRandomOp<const Tensor&, TENSOROPTIONS, optional<MemoryFormat>>);
|
||||
m.impl("randn_like", unsupportedRandomOp<const Tensor&, TENSOROPTIONS, optional<MemoryFormat>>);
|
||||
@ -78,34 +78,34 @@ TORCH_LIBRARY_IMPL(aten, VmapMode, m) {
|
||||
m.impl("randint_like.low_dtype", unsupportedRandomOp<const Tensor&, int64_t, int64_t, TENSOROPTIONS, optional<MemoryFormat>>);
|
||||
|
||||
m.impl("rand", unsupportedRandomOp<IntArrayRef, TENSOROPTIONS>);
|
||||
m.impl("rand.generator", unsupportedRandomOp<IntArrayRef, const optional<Generator>&, TENSOROPTIONS>);
|
||||
m.impl("rand.generator", unsupportedRandomOp<IntArrayRef, optional<Generator>, TENSOROPTIONS>);
|
||||
m.impl("rand.names", unsupportedRandomOp<IntArrayRef, optional<DimnameList>, TENSOROPTIONS>);
|
||||
m.impl("rand.generator_with_names", unsupportedRandomOp<IntArrayRef, const optional<Generator>&, optional<DimnameList>, TENSOROPTIONS>);
|
||||
m.impl("rand.generator_with_names", unsupportedRandomOp<IntArrayRef, optional<Generator>, optional<DimnameList>, TENSOROPTIONS>);
|
||||
m.impl("rand.out", unsupportedRandomOp_<IntArrayRef, Tensor&>);
|
||||
m.impl("rand.generator_out", unsupportedRandomOp_<IntArrayRef, const optional<Generator>&, Tensor&>);
|
||||
m.impl("rand.generator_out", unsupportedRandomOp_<IntArrayRef, optional<Generator>, Tensor&>);
|
||||
|
||||
m.impl("randn", unsupportedRandomOp<IntArrayRef, TENSOROPTIONS>);
|
||||
m.impl("randn.generator", unsupportedRandomOp<IntArrayRef, const optional<Generator>&, TENSOROPTIONS>);
|
||||
m.impl("randn.generator", unsupportedRandomOp<IntArrayRef, optional<Generator>, TENSOROPTIONS>);
|
||||
m.impl("randn.names", unsupportedRandomOp<IntArrayRef, optional<DimnameList>, TENSOROPTIONS>);
|
||||
m.impl("randn.generator_with_names", unsupportedRandomOp<IntArrayRef, const optional<Generator>&, optional<DimnameList>, TENSOROPTIONS>);
|
||||
m.impl("randn.generator_with_names", unsupportedRandomOp<IntArrayRef, optional<Generator>, optional<DimnameList>, TENSOROPTIONS>);
|
||||
m.impl("randn.out", unsupportedRandomOp_<IntArrayRef, Tensor&>);
|
||||
m.impl("randn.generator_out", unsupportedRandomOp_<IntArrayRef, const optional<Generator>&, Tensor&>);
|
||||
m.impl("randn.generator_out", unsupportedRandomOp_<IntArrayRef, optional<Generator>, Tensor&>);
|
||||
|
||||
m.impl("randperm", unsupportedRandomOp<int64_t, TENSOROPTIONS>);
|
||||
m.impl("randperm.generator", unsupportedRandomOp<int64_t, const optional<Generator>&, TENSOROPTIONS>);
|
||||
m.impl("randperm.generator", unsupportedRandomOp<int64_t, optional<Generator>, TENSOROPTIONS>);
|
||||
m.impl("randperm.out", unsupportedRandomOp_<int64_t, Tensor&>);
|
||||
m.impl("randperm.generator_out", unsupportedRandomOp_<int64_t, const optional<Generator>&, Tensor&>);
|
||||
m.impl("randperm.generator_out", unsupportedRandomOp_<int64_t, optional<Generator>, Tensor&>);
|
||||
|
||||
m.impl("randint", unsupportedRandomOp<int64_t, IntArrayRef, TENSOROPTIONS>);
|
||||
m.impl("randint.generator", unsupportedRandomOp<int64_t, IntArrayRef, const optional<Generator>&, TENSOROPTIONS>);
|
||||
m.impl("randint.generator", unsupportedRandomOp<int64_t, IntArrayRef, optional<Generator>, TENSOROPTIONS>);
|
||||
m.impl("randint.low", unsupportedRandomOp<int64_t, int64_t, IntArrayRef, TENSOROPTIONS>);
|
||||
m.impl("randint.low_generator", unsupportedRandomOp<int64_t, int64_t, IntArrayRef, const optional<Generator>&, TENSOROPTIONS>);
|
||||
m.impl("randint.low_generator", unsupportedRandomOp<int64_t, int64_t, IntArrayRef, optional<Generator>, TENSOROPTIONS>);
|
||||
m.impl("randint.out", unsupportedRandomOp_<int64_t, IntArrayRef, Tensor&>);
|
||||
m.impl("randint.generator_out", unsupportedRandomOp_<int64_t, IntArrayRef, const optional<Generator>&, Tensor&>);
|
||||
m.impl("randint.generator_out", unsupportedRandomOp_<int64_t, IntArrayRef, optional<Generator>, Tensor&>);
|
||||
m.impl("randint.low_out", unsupportedRandomOp_<int64_t, int64_t, IntArrayRef, Tensor&>);
|
||||
m.impl("randint.low_generator_out", unsupportedRandomOp_<int64_t, int64_t, IntArrayRef, const optional<Generator>&, Tensor&>);
|
||||
m.impl("randint.low_generator_out", unsupportedRandomOp_<int64_t, int64_t, IntArrayRef, optional<Generator>, Tensor&>);
|
||||
|
||||
m.impl("uniform_", unsupportedRandomOp_<Tensor&, double, double, const optional<Generator>&>);
|
||||
m.impl("uniform_", unsupportedRandomOp_<Tensor&, double, double, optional<Generator>>);
|
||||
|
||||
#undef TENSOROPTIONS
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ Generator make_generator(Args&&... args) {
|
||||
* the backend generator type (CPU/CUDAGeneratorImpl etc.)
|
||||
*/
|
||||
template <typename T>
|
||||
static inline T * check_generator(const std::optional<Generator>& gen) {
|
||||
static inline T * check_generator(c10::optional<Generator> gen) {
|
||||
TORCH_CHECK(gen.has_value(), "Expected Generator but received nullopt");
|
||||
TORCH_CHECK(gen->defined(), "Generator with undefined implementation is not allowed");
|
||||
TORCH_CHECK(T::device_type() == gen->device().type(), "Expected a '", T::device_type(), "' device type for generator but found '", gen->device().type(), "'");
|
||||
@ -163,7 +163,7 @@ static inline T * check_generator(const std::optional<Generator>& gen) {
|
||||
* the backend generator type (CPU/CUDAGeneratorImpl etc.)
|
||||
*/
|
||||
template <typename T>
|
||||
static inline T* get_generator_or_default(const std::optional<Generator>& gen, const Generator& default_gen) {
|
||||
static inline T* get_generator_or_default(const c10::optional<Generator>& gen, const Generator& default_gen) {
|
||||
return gen.has_value() && gen->defined() ? check_generator<T>(gen) : check_generator<T>(default_gen);
|
||||
}
|
||||
|
||||
|
@ -89,7 +89,7 @@ namespace detail {
|
||||
ts = ts | gen.key_set();
|
||||
}
|
||||
}
|
||||
void operator()(const std::optional<at::Generator>& gen) {
|
||||
void operator()(const c10::optional<at::Generator>& gen) {
|
||||
if (gen.has_value() && gen->defined()) {
|
||||
ts = ts | gen->key_set();
|
||||
}
|
||||
|
@ -303,8 +303,8 @@ static std::tuple<Tensor, optional<int64_t>> log_sigmoid_backward_batch_rule(
|
||||
return std::make_tuple(at::log_sigmoid_backward(out_grad, out_self, out_buffer), 0);
|
||||
}
|
||||
|
||||
static Tensor binomial_wrapper(const Tensor& count, const Tensor& prob, const std::optional<Generator>& gen) {
|
||||
return at::binomial(count, prob.contiguous(), gen); // Bug in PyTorch, prob shouldn't need to be contiguous
|
||||
static Tensor binomial_wrapper(const Tensor& count, const Tensor& prob, c10::optional<Generator> gen) {
|
||||
return at::binomial(count, prob.contiguous(), std::move(gen)); // Bug in PyTorch, prob shouldn't need to be contiguous
|
||||
}
|
||||
|
||||
TORCH_LIBRARY_IMPL(aten, FuncTorchVmapMode, m) {
|
||||
|
@ -58,7 +58,7 @@ Tensor& random_inplace_batching_rule(Tensor& self, ExtraArgs... extra_args) {
|
||||
}
|
||||
}
|
||||
|
||||
static Tensor& bernoulli_inplace_Tensor_batching_rule(Tensor& self, const Tensor& p_, const std::optional<Generator>& gen) {
|
||||
static Tensor& bernoulli_inplace_Tensor_batching_rule(Tensor& self, const Tensor& p_, c10::optional<Generator> gen) {
|
||||
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchVmapMode);
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
auto cur_level = maybe_layer->layerId();
|
||||
@ -94,11 +94,11 @@ static Tensor& bernoulli_inplace_Tensor_batching_rule(Tensor& self, const Tensor
|
||||
"If this is necessary for your usage, please file an issue with functorch.");
|
||||
if (randomness == RandomnessType::Same && self_bdim) {
|
||||
auto intermediate = empty(self.sizes(), self.options());
|
||||
intermediate.bernoulli_(other_, gen);
|
||||
intermediate.bernoulli_(other_, std::move(gen));
|
||||
self.copy_(intermediate); // batching should make this just work out...
|
||||
return self;
|
||||
} else {
|
||||
self_.bernoulli_(other_, gen);
|
||||
self_.bernoulli_(other_, std::move(gen));
|
||||
return self;
|
||||
}
|
||||
}
|
||||
@ -213,7 +213,7 @@ static std::tuple<Tensor,Tensor> native_dropout_batching_rule(const Tensor& tens
|
||||
return std::make_tuple(output, mask);
|
||||
}
|
||||
|
||||
static Tensor multinomial_batching_rule(const Tensor& self, const int64_t num_samples, const bool replacement, const std::optional<Generator>& generator) {
|
||||
static Tensor multinomial_batching_rule(const Tensor& self, const int64_t num_samples, const bool replacement, const c10::optional<Generator> generator) {
|
||||
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchVmapMode);
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
const auto cur_level = maybe_layer->layerId();
|
||||
|
@ -573,7 +573,7 @@ inline void _rrelu_with_noise_train(
|
||||
const Tensor& noise,
|
||||
const Scalar& lower_,
|
||||
const Scalar& upper_,
|
||||
const std::optional<Generator>& generator) {
|
||||
c10::optional<Generator> generator) {
|
||||
using opmath_t = at::opmath_type<scalar_t>;
|
||||
opmath_t lower = lower_.to<opmath_t>();
|
||||
opmath_t upper = upper_.to<opmath_t>();
|
||||
@ -604,7 +604,7 @@ Tensor& rrelu_with_noise_out_cpu(const Tensor& self,
|
||||
const Scalar& lower,
|
||||
const Scalar& upper,
|
||||
bool training,
|
||||
const std::optional<Generator>& generator,
|
||||
c10::optional<Generator> generator,
|
||||
Tensor& output) {
|
||||
if (training) {
|
||||
AT_DISPATCH_FLOATING_TYPES_AND(ScalarType::BFloat16, self.scalar_type(), "rrelu_with_noise_out_cpu", [&] {
|
||||
@ -626,10 +626,10 @@ Tensor rrelu_with_noise_cpu(
|
||||
const Scalar& lower,
|
||||
const Scalar& upper,
|
||||
bool training,
|
||||
const std::optional<Generator>& generator) {
|
||||
c10::optional<Generator> generator) {
|
||||
auto output = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
|
||||
return at::native::rrelu_with_noise_out_cpu(
|
||||
self, noise, lower, upper, training, generator, output);
|
||||
self, noise, lower, upper, training, std::move(generator), output);
|
||||
}
|
||||
|
||||
Tensor& rrelu_with_noise_cpu_(
|
||||
@ -638,9 +638,9 @@ Tensor& rrelu_with_noise_cpu_(
|
||||
const Scalar& lower,
|
||||
const Scalar& upper,
|
||||
bool training,
|
||||
const std::optional<Generator>& generator) {
|
||||
c10::optional<Generator> generator) {
|
||||
return at::native::rrelu_with_noise_out_cpu(
|
||||
self, noise, lower, upper, training, generator, self);
|
||||
self, noise, lower, upper, training, std::move(generator), self);
|
||||
}
|
||||
|
||||
Tensor rrelu_with_noise_backward(
|
||||
@ -661,14 +661,14 @@ Tensor rrelu_with_noise_backward(
|
||||
}
|
||||
}
|
||||
|
||||
Tensor rrelu(const Tensor & self, const Scalar& lower, const Scalar& upper, bool training, const std::optional<Generator>& generator) {
|
||||
Tensor rrelu(const Tensor & self, const Scalar& lower, const Scalar& upper, bool training, c10::optional<Generator> generator) {
|
||||
TORCH_CHECK(lower.to<double>() <= upper.to<double>(), "Lower bound should be less than or equal to the upper bound")
|
||||
return at::rrelu_with_noise(self, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT), lower, upper, training, generator);
|
||||
return at::rrelu_with_noise(self, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT), lower, upper, training, std::move(generator));
|
||||
}
|
||||
|
||||
Tensor & rrelu_(Tensor & self, const Scalar& lower, const Scalar& upper, bool training, const std::optional<Generator>& generator) {
|
||||
Tensor & rrelu_(Tensor & self, const Scalar& lower, const Scalar& upper, bool training, c10::optional<Generator> generator) {
|
||||
TORCH_CHECK(lower.to<double>() <= upper.to<double>(), "Lower bound should be less than or equal to the upper bound")
|
||||
return at::rrelu_with_noise_(self, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT), lower, upper, training, generator);
|
||||
return at::rrelu_with_noise_(self, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT), lower, upper, training, std::move(generator));
|
||||
}
|
||||
|
||||
TORCH_IMPL_FUNC(threshold_out)(const Tensor& self, const Scalar& threshold, const Scalar& value, const Tensor& result) {
|
||||
|
@ -81,7 +81,7 @@ int64_t update_to(int64_t to) {
|
||||
}
|
||||
|
||||
template<template<typename> class random_kernel, typename RNG>
|
||||
at::Tensor& random_impl(at::Tensor& self, const std::optional<Generator>& generator) {
|
||||
at::Tensor& random_impl(at::Tensor& self, c10::optional<Generator> generator) {
|
||||
CHECK_EMPTY_AND_RETURN(self);
|
||||
auto iter = at::TensorIterator::borrowing_nullary_op(self);
|
||||
random_kernel<RNG>()(iter, generator);
|
||||
@ -132,7 +132,7 @@ static void check_from_to_in_range(int64_t from, int64_t to_inc, caffe2::TypeMet
|
||||
}
|
||||
|
||||
template<template<typename> class random_from_to_kernel, typename RNG>
|
||||
at::Tensor& random_from_to_impl(at::Tensor& self, int64_t from, c10::optional<int64_t> to_opt, const std::optional<Generator>& generator) {
|
||||
at::Tensor& random_from_to_impl(at::Tensor& self, int64_t from, c10::optional<int64_t> to_opt, c10::optional<Generator> generator) {
|
||||
uint64_t range = 0;
|
||||
auto iter = at::TensorIterator::borrowing_nullary_op(self);
|
||||
if (to_opt.has_value()) {
|
||||
@ -200,7 +200,7 @@ at::Tensor& random_from_to_impl(at::Tensor& self, int64_t from, c10::optional<in
|
||||
TORCH_CHECK(std >= 0.0, "normal expects std >= 0.0, but found std ", std);
|
||||
|
||||
template<template<typename> class normal_kernel, typename RNG>
|
||||
Tensor& normal_impl_(Tensor& self, double mean, double std, const std::optional<Generator>& gen) {
|
||||
Tensor& normal_impl_(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
|
||||
CHECK_NORMAL_STD(std);
|
||||
CHECK_EMPTY_AND_RETURN(self);
|
||||
|
||||
@ -216,7 +216,7 @@ Tensor& normal_impl_(Tensor& self, double mean, double std, const std::optional<
|
||||
}
|
||||
|
||||
template<template<typename> class normal_kernel, typename RNG>
|
||||
Tensor& normal_out_impl(Tensor& output, const Tensor& mean, double std, const std::optional<Generator>& gen) {
|
||||
Tensor& normal_out_impl(Tensor& output, const Tensor& mean, double std, c10::optional<Generator> gen) {
|
||||
CHECK_NORMAL_STD(std);
|
||||
auto std_tensor = at::empty_like(output, MemoryFormat::Contiguous);
|
||||
auto shape = at::infer_size(mean.sizes(), std_tensor.sizes());
|
||||
@ -227,7 +227,7 @@ Tensor& normal_out_impl(Tensor& output, const Tensor& mean, double std, const st
|
||||
}
|
||||
|
||||
template<template<typename> class normal_kernel, typename RNG>
|
||||
Tensor& normal_out_impl(Tensor& output, double mean, const Tensor& std, const std::optional<Generator>& gen) {
|
||||
Tensor& normal_out_impl(Tensor& output, double mean, const Tensor& std, c10::optional<Generator> gen) {
|
||||
CHECK_NORMAL_TENSOR_STD(std);
|
||||
auto mean_tensor = at::full({}, mean, output.options());
|
||||
auto shape = at::infer_size(mean_tensor.sizes(), std.sizes());
|
||||
@ -242,7 +242,7 @@ Tensor& normal_out_impl(Tensor& output, double mean, const Tensor& std, const st
|
||||
}
|
||||
|
||||
template<template<typename> class normal_kernel, typename RNG>
|
||||
Tensor& normal_out_impl(Tensor& output, const Tensor& mean, const Tensor& std, const std::optional<Generator>& gen) {
|
||||
Tensor& normal_out_impl(Tensor& output, const Tensor& mean, const Tensor& std, c10::optional<Generator> gen) {
|
||||
CHECK_NORMAL_TENSOR_STD(std);
|
||||
auto shape = at::infer_size(mean.sizes(), std.sizes());
|
||||
at::native::resize_output(output, shape);
|
||||
@ -256,7 +256,7 @@ Tensor& normal_out_impl(Tensor& output, const Tensor& mean, const Tensor& std, c
|
||||
}
|
||||
|
||||
template<template<typename> class normal_kernel, typename RNG>
|
||||
Tensor normal_impl(const Tensor& mean, double std, const std::optional<Generator>& gen) {
|
||||
Tensor normal_impl(const Tensor& mean, double std, c10::optional<Generator> gen) {
|
||||
CHECK_NORMAL_STD(std);
|
||||
Tensor ret = at::empty_like(mean, MemoryFormat::Contiguous);
|
||||
normal_out_impl<normal_kernel, RNG>(ret, mean, std, gen);
|
||||
@ -264,7 +264,7 @@ Tensor normal_impl(const Tensor& mean, double std, const std::optional<Generator
|
||||
}
|
||||
|
||||
template<template<typename> class normal_kernel, typename RNG>
|
||||
Tensor normal_impl(double mean, const Tensor& std, const std::optional<Generator>& gen) {
|
||||
Tensor normal_impl(double mean, const Tensor& std, c10::optional<Generator> gen) {
|
||||
CHECK_NORMAL_TENSOR_STD(std);
|
||||
Tensor ret = at::empty_like(std, MemoryFormat::Contiguous);
|
||||
normal_out_impl<normal_kernel, RNG>(ret, mean, std, gen);
|
||||
@ -272,7 +272,7 @@ Tensor normal_impl(double mean, const Tensor& std, const std::optional<Generator
|
||||
}
|
||||
|
||||
template<template<typename> class normal_kernel, typename RNG>
|
||||
Tensor normal_impl(const Tensor& mean, const Tensor& std, const std::optional<Generator>& gen) {
|
||||
Tensor normal_impl(const Tensor& mean, const Tensor& std, c10::optional<Generator> gen) {
|
||||
CHECK_NORMAL_TENSOR_STD(std);
|
||||
auto shape = at::infer_size(mean.sizes(), std.sizes());
|
||||
Tensor ret = at::empty(shape, mean.options(), MemoryFormat::Contiguous);
|
||||
@ -283,7 +283,7 @@ Tensor normal_impl(const Tensor& mean, const Tensor& std, const std::optional<Ge
|
||||
// ==================================================== Uniform =======================================================
|
||||
|
||||
template<template<typename> class uniform_kernel, typename RNG>
|
||||
at::Tensor& uniform_impl_(at::Tensor& self, double from, double to, const std::optional<Generator>& generator) {
|
||||
at::Tensor& uniform_impl_(at::Tensor& self, double from, double to, c10::optional<Generator> generator) {
|
||||
if (self.is_complex()) {
|
||||
CHECK_EMPTY_AND_RETURN(self);
|
||||
auto float_tensor = at::view_as_real(self);
|
||||
@ -313,7 +313,7 @@ at::Tensor& uniform_impl_(at::Tensor& self, double from, double to, const std::o
|
||||
// ================================================== LogNormal =======================================================
|
||||
|
||||
template<template<typename> class log_normal_kernel, typename RNG>
|
||||
at::Tensor& log_normal_impl_(at::Tensor& self, double mean, double std, const std::optional<Generator>& gen) {
|
||||
at::Tensor& log_normal_impl_(at::Tensor& self, double mean, double std, c10::optional<Generator> gen) {
|
||||
TORCH_CHECK(std > 0.0, "log_normal_ expects std > 0.0, but found std=", std);
|
||||
CHECK_EMPTY_AND_RETURN(self);
|
||||
auto iter = TensorIterator::borrowing_nullary_op(self);
|
||||
@ -324,7 +324,7 @@ at::Tensor& log_normal_impl_(at::Tensor& self, double mean, double std, const st
|
||||
// =================================================== Geometric ======================================================
|
||||
|
||||
template<template<typename> class geometric_kernel, typename RNG>
|
||||
Tensor& geometric_impl_(Tensor& self, double p, const std::optional<Generator>& gen) {
|
||||
Tensor& geometric_impl_(Tensor& self, double p, c10::optional<Generator> gen) {
|
||||
TORCH_CHECK(0 < p && p < 1, "geometric_ expects p to be in (0, 1), but got p=", p);
|
||||
CHECK_EMPTY_AND_RETURN(self);
|
||||
auto iter = TensorIterator::borrowing_nullary_op(self);
|
||||
@ -335,7 +335,7 @@ Tensor& geometric_impl_(Tensor& self, double p, const std::optional<Generator>&
|
||||
// ================================================== Exponential =====================================================
|
||||
|
||||
template<template<typename> class exponential_kernel, typename RNG>
|
||||
Tensor& exponential_impl_(Tensor& self, double lambda, const std::optional<Generator>& gen) {
|
||||
Tensor& exponential_impl_(Tensor& self, double lambda, c10::optional<Generator> gen) {
|
||||
TORCH_CHECK(lambda > 0.0, "exponential_ expects lambda > 0.0, but found lambda=", lambda);
|
||||
CHECK_EMPTY_AND_RETURN(self);
|
||||
auto iter = TensorIterator::borrowing_nullary_op(self);
|
||||
@ -346,7 +346,7 @@ Tensor& exponential_impl_(Tensor& self, double lambda, const std::optional<Gener
|
||||
// ==================================================== Cauchy ========================================================
|
||||
|
||||
template<template<typename> class cauchy_kernel, typename RNG>
|
||||
Tensor& cauchy_impl_(Tensor& self, double median, double sigma, const std::optional<Generator>& gen) {
|
||||
Tensor& cauchy_impl_(Tensor& self, double median, double sigma, c10::optional<Generator> gen) {
|
||||
// TODO: instead of variable name 'sigma', use 'gamma' or 'scale'
|
||||
// the variance, squared sigma, is undefined for cauchy distribution
|
||||
TORCH_CHECK(sigma > 0.0, "cauchy_ expects sigma > 0.0, but found sigma=", sigma);
|
||||
@ -360,7 +360,7 @@ Tensor& cauchy_impl_(Tensor& self, double median, double sigma, const std::optio
|
||||
// ==================================================== Bernoulli =====================================================
|
||||
|
||||
template<template<typename> class bernoulli_tensor_kernel, typename RNG>
|
||||
Tensor& bernoulli_impl_(Tensor& self, const Tensor& p_, const std::optional<Generator>& gen) {
|
||||
Tensor& bernoulli_impl_(Tensor& self, const Tensor& p_, c10::optional<Generator> gen) {
|
||||
CHECK_EMPTY_AND_RETURN(self);
|
||||
NoNamesGuard guard;
|
||||
at::assert_no_internal_overlap(self);
|
||||
@ -369,7 +369,7 @@ Tensor& bernoulli_impl_(Tensor& self, const Tensor& p_, const std::optional<Gene
|
||||
}
|
||||
|
||||
template<template<typename> class bernoulli_scalar_kernel, typename RNG>
|
||||
Tensor& bernoulli_impl_(Tensor& self, double p, const std::optional<Generator>& gen) {
|
||||
Tensor& bernoulli_impl_(Tensor& self, double p, c10::optional<Generator> gen) {
|
||||
TORCH_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p);
|
||||
CHECK_EMPTY_AND_RETURN(self);
|
||||
at::assert_no_internal_overlap(self);
|
||||
@ -378,7 +378,7 @@ Tensor& bernoulli_impl_(Tensor& self, double p, const std::optional<Generator>&
|
||||
}
|
||||
|
||||
template<template<typename> class bernoulli_tensor_kernel, typename RNG>
|
||||
Tensor& bernoulli_out_impl(Tensor& result, const Tensor& self, const std::optional<Generator>& gen) {
|
||||
Tensor& bernoulli_out_impl(Tensor& result, const Tensor& self, c10::optional<Generator> gen) {
|
||||
// result.resize_as_(self) requires self to have same dtype as result, so we
|
||||
// use resize_ instead.
|
||||
// TODO: Fix resize_as_. See pytorch/pytorch#11665.
|
||||
|
@ -160,96 +160,96 @@ DEFINE_DISPATCH(random_full_64_bits_range_stub);
|
||||
|
||||
template<typename RNG>
|
||||
struct BernoulliStub {
|
||||
void operator()(Tensor& self, const Tensor& p_, const std::optional<Generator>& gen) {
|
||||
void operator()(Tensor& self, const Tensor& p_, c10::optional<Generator> gen) {
|
||||
bernoulli_tensor_stub(self.device().type(), self, p_, gen);
|
||||
}
|
||||
|
||||
void operator()(Tensor& self, double p, const std::optional<Generator>& gen) {
|
||||
void operator()(Tensor& self, double p, c10::optional<Generator> gen) {
|
||||
bernoulli_scalar_stub(self.device().type(), self, p, gen);
|
||||
}
|
||||
};
|
||||
|
||||
Tensor bernoulli(const Tensor& self, const std::optional<Generator>& gen) {
|
||||
Tensor bernoulli(const Tensor& self, c10::optional<Generator> gen) {
|
||||
Tensor result = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
|
||||
result.bernoulli_(self, gen);
|
||||
result.bernoulli_(self, std::move(gen));
|
||||
return result;
|
||||
}
|
||||
|
||||
Tensor bernoulli(const Tensor& self, double p, const std::optional<Generator>& gen) {
|
||||
Tensor bernoulli(const Tensor& self, double p, c10::optional<Generator> gen) {
|
||||
Tensor result = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
|
||||
result.bernoulli_(p, gen);
|
||||
result.bernoulli_(p, std::move(gen));
|
||||
return result;
|
||||
}
|
||||
|
||||
Tensor& bernoulli_out(const Tensor& self, const std::optional<Generator>& gen, Tensor& result) {
|
||||
return at::native::templates::bernoulli_out_impl<BernoulliStub, Generator>(result, self, gen);
|
||||
Tensor& bernoulli_out(const Tensor& self, c10::optional<Generator> gen, Tensor& result) {
|
||||
return at::native::templates::bernoulli_out_impl<BernoulliStub, Generator>(result, self, std::move(gen));
|
||||
}
|
||||
|
||||
Tensor& bernoulli_(Tensor& self, const Tensor& p_, const std::optional<Generator>& gen) {
|
||||
return at::native::templates::bernoulli_impl_<BernoulliStub, Generator>(self, p_, gen);
|
||||
Tensor& bernoulli_(Tensor& self, const Tensor& p_, c10::optional<Generator> gen) {
|
||||
return at::native::templates::bernoulli_impl_<BernoulliStub, Generator>(self, p_, std::move(gen));
|
||||
}
|
||||
|
||||
Tensor& bernoulli_(Tensor& self, double p, const std::optional<Generator>& gen) {
|
||||
return at::native::templates::bernoulli_impl_<BernoulliStub, Generator>(self, p, gen);
|
||||
Tensor& bernoulli_(Tensor& self, double p, c10::optional<Generator> gen) {
|
||||
return at::native::templates::bernoulli_impl_<BernoulliStub, Generator>(self, p, std::move(gen));
|
||||
}
|
||||
|
||||
// ================================================== LogNormal =======================================================
|
||||
|
||||
template<typename RNG>
|
||||
struct LogNormalStub {
|
||||
void operator()(TensorIteratorBase& iter, double mean, double std, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, double mean, double std, c10::optional<Generator> gen) {
|
||||
log_normal_stub(iter.device_type(), iter, mean, std, gen);
|
||||
}
|
||||
};
|
||||
|
||||
Tensor& log_normal_(Tensor& self, double mean, double std, const std::optional<Generator>& gen) {
|
||||
return at::native::templates::log_normal_impl_<LogNormalStub, Generator>(self, mean, std, gen);
|
||||
Tensor& log_normal_(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
|
||||
return at::native::templates::log_normal_impl_<LogNormalStub, Generator>(self, mean, std, std::move(gen));
|
||||
}
|
||||
|
||||
// ==================================================== Cauchy ========================================================
|
||||
|
||||
template<typename RNG>
|
||||
struct CauchyStub {
|
||||
void operator()(TensorIteratorBase& iter, double median, double sigma, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, double median, double sigma, c10::optional<Generator> gen) {
|
||||
cauchy_stub(iter.device_type(), iter, median, sigma, gen);
|
||||
}
|
||||
};
|
||||
|
||||
Tensor& cauchy_(Tensor& self, double median, double sigma, const std::optional<Generator>& gen) {
|
||||
return at::native::templates::cauchy_impl_<CauchyStub, Generator>(self, median, sigma, gen);
|
||||
Tensor& cauchy_(Tensor& self, double median, double sigma, c10::optional<Generator> gen) {
|
||||
return at::native::templates::cauchy_impl_<CauchyStub, Generator>(self, median, sigma, std::move(gen));
|
||||
}
|
||||
|
||||
// ================================================== Exponential =====================================================
|
||||
|
||||
template<typename RNG>
|
||||
struct ExponentialStub {
|
||||
void operator()(TensorIteratorBase& iter, double lambda, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, double lambda, c10::optional<Generator> gen) {
|
||||
exponential_stub(iter.device_type(), iter, lambda, gen);
|
||||
}
|
||||
};
|
||||
|
||||
Tensor& exponential_(Tensor& self, double lambda, const std::optional<Generator>& gen) {
|
||||
return at::native::templates::exponential_impl_<ExponentialStub, Generator>(self, lambda, gen);
|
||||
Tensor& exponential_(Tensor& self, double lambda, c10::optional<Generator> gen) {
|
||||
return at::native::templates::exponential_impl_<ExponentialStub, Generator>(self, lambda, std::move(gen));
|
||||
}
|
||||
|
||||
// =================================================== Geometric ======================================================
|
||||
|
||||
template<typename RNG>
|
||||
struct GeometricStub {
|
||||
void operator()(TensorIteratorBase& iter, double p, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, double p, c10::optional<Generator> gen) {
|
||||
geometric_stub(iter.device_type(), iter, p, gen);
|
||||
}
|
||||
};
|
||||
|
||||
Tensor& geometric_(Tensor& self, double p, const std::optional<Generator>& gen) {
|
||||
return at::native::templates::geometric_impl_<GeometricStub, Generator>(self, p, gen);
|
||||
Tensor& geometric_(Tensor& self, double p, c10::optional<Generator> gen) {
|
||||
return at::native::templates::geometric_impl_<GeometricStub, Generator>(self, p, std::move(gen));
|
||||
}
|
||||
|
||||
// ==================================================== Uniform =======================================================
|
||||
|
||||
template<typename RNG>
|
||||
struct UniformStub {
|
||||
void operator()(TensorIteratorBase& iter, double from, double to, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, double from, double to, c10::optional<Generator> gen) {
|
||||
uniform_stub(iter.device_type(), iter, from, to, gen);
|
||||
}
|
||||
};
|
||||
@ -257,23 +257,23 @@ struct UniformStub {
|
||||
template<typename RNG>
|
||||
struct UniformMeta {
|
||||
// No-op!
|
||||
void operator()(TensorIteratorBase& iter, double from, double to, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, double from, double to, c10::optional<Generator> gen) {
|
||||
}
|
||||
};
|
||||
|
||||
Tensor& uniform_(Tensor& self, double from, double to, const std::optional<Generator>& gen) {
|
||||
return at::native::templates::uniform_impl_<UniformStub, Generator>(self, from, to, gen);
|
||||
Tensor& uniform_(Tensor& self, double from, double to, c10::optional<Generator> gen) {
|
||||
return at::native::templates::uniform_impl_<UniformStub, Generator>(self, from, to, std::move(gen));
|
||||
}
|
||||
|
||||
Tensor& uniform_meta_(Tensor& self, double from, double to, const std::optional<Generator>& gen) {
|
||||
return at::native::templates::uniform_impl_<UniformMeta, Generator>(self, from, to, gen);
|
||||
Tensor& uniform_meta_(Tensor& self, double from, double to, c10::optional<Generator> gen) {
|
||||
return at::native::templates::uniform_impl_<UniformMeta, Generator>(self, from, to, std::move(gen));
|
||||
}
|
||||
|
||||
// ==================================================== Normal ========================================================
|
||||
|
||||
template<typename RNG>
|
||||
struct NormalStub {
|
||||
void operator()(Tensor& self, double mean, double std, const std::optional<Generator>& gen) {
|
||||
void operator()(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
|
||||
normal_stub(self.device().type(), self, mean, std, gen);
|
||||
}
|
||||
};
|
||||
@ -281,76 +281,76 @@ struct NormalStub {
|
||||
template<typename RNG>
|
||||
struct NormalMeta {
|
||||
// No-op!
|
||||
void operator()(Tensor& self, double mean, double std, const std::optional<Generator>& gen) {
|
||||
void operator()(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
|
||||
}
|
||||
};
|
||||
|
||||
// inplace
|
||||
Tensor& normal_(Tensor& self, double mean, double std, const std::optional<Generator>& gen) {
|
||||
return at::native::templates::normal_impl_<NormalStub, Generator>(self, mean, std, gen);
|
||||
Tensor& normal_(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
|
||||
return at::native::templates::normal_impl_<NormalStub, Generator>(self, mean, std, std::move(gen));
|
||||
}
|
||||
|
||||
Tensor& normal_meta_(Tensor& self, double mean, double std, const std::optional<Generator>& gen) {
|
||||
return at::native::templates::normal_impl_<NormalMeta, Generator>(self, mean, std, gen);
|
||||
Tensor& normal_meta_(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
|
||||
return at::native::templates::normal_impl_<NormalMeta, Generator>(self, mean, std, std::move(gen));
|
||||
}
|
||||
|
||||
// out tensor float
|
||||
Tensor& normal_out(const Tensor& mean, double std, const std::optional<Generator>& gen, Tensor& output) {
|
||||
return at::native::templates::normal_out_impl<NormalStub, Generator>(output, mean, std, gen);
|
||||
Tensor& normal_out(const Tensor& mean, double std, c10::optional<Generator> gen, Tensor& output) {
|
||||
return at::native::templates::normal_out_impl<NormalStub, Generator>(output, mean, std, std::move(gen));
|
||||
}
|
||||
|
||||
Tensor& normal_out_meta(const Tensor& mean, double std, const std::optional<Generator>& gen, Tensor& output) {
|
||||
return at::native::templates::normal_out_impl<NormalMeta, Generator>(output, mean, std, gen);
|
||||
Tensor& normal_out_meta(const Tensor& mean, double std, c10::optional<Generator> gen, Tensor& output) {
|
||||
return at::native::templates::normal_out_impl<NormalMeta, Generator>(output, mean, std, std::move(gen));
|
||||
}
|
||||
|
||||
// out float tensor
|
||||
Tensor& normal_out(double mean, const Tensor& std, const std::optional<Generator>& gen, Tensor& output) {
|
||||
return at::native::templates::normal_out_impl<NormalStub, Generator>(output, mean, std, gen);
|
||||
Tensor& normal_out(double mean, const Tensor& std, c10::optional<Generator> gen, Tensor& output) {
|
||||
return at::native::templates::normal_out_impl<NormalStub, Generator>(output, mean, std, std::move(gen));
|
||||
}
|
||||
|
||||
Tensor& normal_out_meta(double mean, const Tensor& std, const std::optional<Generator>& gen, Tensor& output) {
|
||||
return at::native::templates::normal_out_impl<NormalMeta, Generator>(output, mean, std, gen);
|
||||
Tensor& normal_out_meta(double mean, const Tensor& std, c10::optional<Generator> gen, Tensor& output) {
|
||||
return at::native::templates::normal_out_impl<NormalMeta, Generator>(output, mean, std, std::move(gen));
|
||||
|
||||
}
|
||||
|
||||
// out tensor tensor
|
||||
Tensor& normal_out(const Tensor& mean, const Tensor& std, const std::optional<Generator>& gen, Tensor& output) {
|
||||
return at::native::templates::normal_out_impl<NormalStub, Generator>(output, mean, std, gen);
|
||||
Tensor& normal_out(const Tensor& mean, const Tensor& std, c10::optional<Generator> gen, Tensor& output) {
|
||||
return at::native::templates::normal_out_impl<NormalStub, Generator>(output, mean, std, std::move(gen));
|
||||
}
|
||||
|
||||
Tensor& normal_out_meta(const Tensor& mean, const Tensor& std, const std::optional<Generator>& gen, Tensor& output) {
|
||||
return at::native::templates::normal_out_impl<NormalMeta, Generator>(output, mean, std, gen);
|
||||
Tensor& normal_out_meta(const Tensor& mean, const Tensor& std, c10::optional<Generator> gen, Tensor& output) {
|
||||
return at::native::templates::normal_out_impl<NormalMeta, Generator>(output, mean, std, std::move(gen));
|
||||
}
|
||||
|
||||
// functional tensor float
|
||||
Tensor normal(const Tensor& mean, double std, const std::optional<Generator>& gen) {
|
||||
return at::native::templates::normal_impl<NormalStub, Generator>(mean, std, gen);
|
||||
Tensor normal(const Tensor& mean, double std, c10::optional<Generator> gen) {
|
||||
return at::native::templates::normal_impl<NormalStub, Generator>(mean, std, std::move(gen));
|
||||
}
|
||||
|
||||
Tensor normal_meta(const Tensor& mean, double std, const std::optional<Generator>& gen) {
|
||||
return at::native::templates::normal_impl<NormalMeta, Generator>(mean, std, gen);
|
||||
Tensor normal_meta(const Tensor& mean, double std, c10::optional<Generator> gen) {
|
||||
return at::native::templates::normal_impl<NormalMeta, Generator>(mean, std, std::move(gen));
|
||||
}
|
||||
|
||||
// functional float tensor
|
||||
Tensor normal(double mean, const Tensor& std, const std::optional<Generator>& gen) {
|
||||
return at::native::templates::normal_impl<NormalStub, Generator>(mean, std, gen);
|
||||
Tensor normal(double mean, const Tensor& std, c10::optional<Generator> gen) {
|
||||
return at::native::templates::normal_impl<NormalStub, Generator>(mean, std, std::move(gen));
|
||||
}
|
||||
|
||||
Tensor normal_meta(double mean, const Tensor& std, const std::optional<Generator>& gen) {
|
||||
return at::native::templates::normal_impl<NormalMeta, Generator>(mean, std, gen);
|
||||
Tensor normal_meta(double mean, const Tensor& std, c10::optional<Generator> gen) {
|
||||
return at::native::templates::normal_impl<NormalMeta, Generator>(mean, std, std::move(gen));
|
||||
}
|
||||
|
||||
// functional tensor tensor
|
||||
Tensor normal(const Tensor& mean, const Tensor& std, const std::optional<Generator>& gen) {
|
||||
return at::native::templates::normal_impl<NormalStub, Generator>(mean, std, gen);
|
||||
Tensor normal(const Tensor& mean, const Tensor& std, c10::optional<Generator> gen) {
|
||||
return at::native::templates::normal_impl<NormalStub, Generator>(mean, std, std::move(gen));
|
||||
}
|
||||
|
||||
Tensor normal_meta(const Tensor& mean, const Tensor& std, const std::optional<Generator>& gen) {
|
||||
return at::native::templates::normal_impl<NormalMeta, Generator>(mean, std, gen);
|
||||
Tensor normal_meta(const Tensor& mean, const Tensor& std, c10::optional<Generator> gen) {
|
||||
return at::native::templates::normal_impl<NormalMeta, Generator>(mean, std, std::move(gen));
|
||||
}
|
||||
|
||||
// functional variant, only used by the functionalization pass.
|
||||
Tensor normal_functional(const Tensor& self, double mean, double std, const std::optional<at::Generator>& generator) {
|
||||
Tensor normal_functional(const Tensor& self, double mean, double std, c10::optional<at::Generator> generator) {
|
||||
return self.clone().normal_(mean, std, std::move(generator));
|
||||
}
|
||||
|
||||
@ -358,44 +358,44 @@ Tensor normal_functional(const Tensor& self, double mean, double std, const std:
|
||||
|
||||
template<typename RNG>
|
||||
struct RandomStub {
|
||||
void operator()(TensorIteratorBase& iter, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, c10::optional<Generator> gen) {
|
||||
random_stub(iter.device_type(), iter, gen);
|
||||
}
|
||||
};
|
||||
|
||||
Tensor& random_(Tensor& self, const std::optional<Generator>& gen) {
|
||||
return at::native::templates::random_impl<RandomStub, Generator>(self, gen);
|
||||
Tensor& random_(Tensor& self, c10::optional<Generator> gen) {
|
||||
return at::native::templates::random_impl<RandomStub, Generator>(self, std::move(gen));
|
||||
}
|
||||
|
||||
template<typename RNG>
|
||||
struct RandomFromToStub {
|
||||
void operator()(TensorIteratorBase& iter, uint64_t range, int64_t from, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, uint64_t range, int64_t from, c10::optional<Generator> gen) {
|
||||
random_from_to_stub(iter.device_type(), iter, range, from, gen);
|
||||
}
|
||||
void operator()(TensorIteratorBase& iter, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, c10::optional<Generator> gen) {
|
||||
random_full_64_bits_range_stub(iter.device_type(), iter, gen);
|
||||
}
|
||||
};
|
||||
|
||||
Tensor& random_(Tensor& self, int64_t from, optional<int64_t> to, const std::optional<Generator>& gen) {
|
||||
return at::native::templates::random_from_to_impl<RandomFromToStub, Generator>(self, from, to, gen);
|
||||
Tensor& random_(Tensor& self, int64_t from, optional<int64_t> to, c10::optional<Generator> gen) {
|
||||
return at::native::templates::random_from_to_impl<RandomFromToStub, Generator>(self, from, to, std::move(gen));
|
||||
}
|
||||
|
||||
Tensor& random_(Tensor& self, int64_t to, const std::optional<Generator>& gen) {
|
||||
return random_(self, 0, to, gen);
|
||||
Tensor& random_(Tensor& self, int64_t to, c10::optional<Generator> gen) {
|
||||
return random_(self, 0, to, std::move(gen));
|
||||
}
|
||||
|
||||
Tensor& random_meta_(Tensor& self, const std::optional<Generator>& gen) {
|
||||
Tensor& random_meta_(Tensor& self, c10::optional<Generator> gen) {
|
||||
// No error checking yay
|
||||
return self;
|
||||
}
|
||||
|
||||
Tensor& random_meta_(Tensor& self, int64_t from, optional<int64_t> to, const std::optional<Generator>& gen) {
|
||||
Tensor& random_meta_(Tensor& self, int64_t from, optional<int64_t> to, c10::optional<Generator> gen) {
|
||||
// No error checking yay
|
||||
return self;
|
||||
}
|
||||
|
||||
Tensor& random_meta_(Tensor& self, int64_t to, const std::optional<Generator>& gen) {
|
||||
Tensor& random_meta_(Tensor& self, int64_t to, c10::optional<Generator> gen) {
|
||||
// No error checking yay
|
||||
return self;
|
||||
}
|
||||
@ -437,7 +437,7 @@ Tensor _dirichlet_grad_cpu(const Tensor& x, const Tensor& alpha, const Tensor& t
|
||||
* This section is a counterpart to Distributions.cu
|
||||
*/
|
||||
|
||||
Tensor _s_binomial_cpu(const Tensor& count, const Tensor& prob, const std::optional<Generator>& gen) {
|
||||
Tensor _s_binomial_cpu(const Tensor& count, const Tensor& prob, c10::optional<Generator> gen) {
|
||||
Tensor ret = at::zeros(count.sizes(), count.options());
|
||||
auto iter = TensorIteratorConfig()
|
||||
.add_output(ret)
|
||||
@ -462,7 +462,7 @@ Tensor _s_binomial_cpu(const Tensor& count, const Tensor& prob, const std::optio
|
||||
return ret;
|
||||
}
|
||||
|
||||
Tensor _s_poisson_cpu(const Tensor& lambda, const std::optional<Generator>& gen) {
|
||||
Tensor _s_poisson_cpu(const Tensor& lambda, c10::optional<Generator> gen) {
|
||||
Tensor ret = at::zeros(lambda.sizes(), lambda.options());
|
||||
auto iter = TensorIteratorConfig()
|
||||
.add_output(ret)
|
||||
@ -479,7 +479,7 @@ Tensor _s_poisson_cpu(const Tensor& lambda, const std::optional<Generator>& gen)
|
||||
return ret;
|
||||
}
|
||||
|
||||
Tensor _s_gamma_cpu(const Tensor& alpha, const std::optional<Generator>& gen) {
|
||||
Tensor _s_gamma_cpu(const Tensor& alpha, c10::optional<Generator> gen) {
|
||||
Tensor ret = at::zeros(alpha.sizes(), alpha.options());
|
||||
auto iter = TensorIteratorConfig()
|
||||
.add_output(ret)
|
||||
@ -509,7 +509,7 @@ Tensor _s_gamma_cpu(const Tensor& alpha, const std::optional<Generator>& gen) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
Tensor _s_dirichlet_cpu(const Tensor& alpha, const std::optional<Generator>& gen) {
|
||||
Tensor _s_dirichlet_cpu(const Tensor& alpha, c10::optional<Generator> gen) {
|
||||
Tensor ret = at::zeros(alpha.sizes(), alpha.options());
|
||||
AT_DISPATCH_FLOATING_TYPES(ret.scalar_type(), "dirichlet", [&] {
|
||||
Tensor gamma = at::zeros(alpha.sizes(), alpha.options().dtype(ScalarType::Double));
|
||||
@ -562,7 +562,7 @@ constexpr int64_t FLOAT32_MAX_CONSECUTIVE_INT = 1 << (FLT_MANT_DIG);
|
||||
Tensor& multinomial_out(const Tensor& self,
|
||||
int64_t n_sample,
|
||||
bool with_replacement,
|
||||
const std::optional<Generator>& gen,
|
||||
c10::optional<Generator> gen,
|
||||
Tensor& result) {
|
||||
TORCH_CHECK(
|
||||
result.device() == self.device(),
|
||||
@ -622,7 +622,7 @@ Tensor& multinomial_out(const Tensor& self,
|
||||
// s = argmax( p / (-log(eps)) ) where eps ~ U(0, 1).
|
||||
// We can also simplify the formula above by
|
||||
// s = argmax( p / q ) where q ~ Exp(1)
|
||||
Tensor q = at::empty_like(self).exponential_(1, gen);
|
||||
Tensor q = at::empty_like(self).exponential_(1, std::move(gen));
|
||||
// In theory the probability to generate 0 from exponential distribution is
|
||||
// 0. However, on CUDA side there is a protection to avoid 0s, but on CPU
|
||||
// side, there is a very low probability to generate 0 from
|
||||
@ -647,9 +647,9 @@ Tensor multinomial(
|
||||
const Tensor& self,
|
||||
int64_t n_sample,
|
||||
bool with_replacement,
|
||||
const std::optional<Generator>& gen) {
|
||||
c10::optional<Generator> gen) {
|
||||
Tensor result = at::empty({0}, self.options().dtype(kLong));
|
||||
native::multinomial_out(self, n_sample, with_replacement, gen, result);
|
||||
native::multinomial_out(self, n_sample, with_replacement, std::move(gen), result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -878,10 +878,10 @@ Tensor rand(IntArrayRef size,
|
||||
c10::optional<Layout> layout,
|
||||
c10::optional<Device> device,
|
||||
c10::optional<bool> pin_memory) {
|
||||
return native::rand(size, static_cast<const std::optional<Generator>&>(c10::nullopt), dtype, layout, device, pin_memory);
|
||||
return native::rand(size, static_cast<c10::optional<Generator>>(c10::nullopt), dtype, layout, device, pin_memory);
|
||||
}
|
||||
|
||||
Tensor rand(IntArrayRef size, const std::optional<Generator>& generator,
|
||||
Tensor rand(IntArrayRef size, c10::optional<Generator> generator,
|
||||
c10::optional<ScalarType> dtype,
|
||||
c10::optional<Layout> layout,
|
||||
c10::optional<Device> device,
|
||||
@ -897,7 +897,7 @@ Tensor& rand_out(IntArrayRef size, Tensor& result) {
|
||||
return native::rand_out(size, c10::nullopt, result);
|
||||
}
|
||||
|
||||
Tensor& rand_out(IntArrayRef size, const std::optional<Generator>& generator, Tensor& result) {
|
||||
Tensor& rand_out(IntArrayRef size, c10::optional<Generator> generator, Tensor& result) {
|
||||
result.resize_(size);
|
||||
return result.uniform_(0, 1, std::move(generator));
|
||||
}
|
||||
@ -929,7 +929,7 @@ Tensor randint(int64_t high, IntArrayRef size,
|
||||
Tensor randint(
|
||||
int64_t high,
|
||||
IntArrayRef size,
|
||||
const std::optional<Generator>& generator,
|
||||
c10::optional<Generator> generator,
|
||||
c10::optional<ScalarType> dtype,
|
||||
c10::optional<Layout> layout,
|
||||
c10::optional<Device> device,
|
||||
@ -952,7 +952,7 @@ Tensor randint(
|
||||
int64_t low,
|
||||
int64_t high,
|
||||
IntArrayRef size,
|
||||
const std::optional<Generator>& generator,
|
||||
c10::optional<Generator> generator,
|
||||
c10::optional<ScalarType> dtype,
|
||||
c10::optional<Layout> layout,
|
||||
c10::optional<Device> device,
|
||||
@ -970,7 +970,7 @@ Tensor& randint_out(int64_t high, IntArrayRef size, Tensor& result) {
|
||||
|
||||
Tensor& randint_out(int64_t high,
|
||||
IntArrayRef size,
|
||||
const std::optional<Generator>& generator,
|
||||
c10::optional<Generator> generator,
|
||||
Tensor& result) {
|
||||
result.resize_(size);
|
||||
return result.random_(0, high, std::move(generator));
|
||||
@ -983,7 +983,7 @@ Tensor& randint_out(int64_t low, int64_t high, IntArrayRef size, Tensor& result)
|
||||
Tensor& randint_out(int64_t low,
|
||||
int64_t high,
|
||||
IntArrayRef size,
|
||||
const std::optional<Generator>& generator,
|
||||
c10::optional<Generator> generator,
|
||||
Tensor& result) {
|
||||
result.resize_(size);
|
||||
return result.random_(low, high, std::move(generator));
|
||||
@ -1027,10 +1027,10 @@ Tensor randn(IntArrayRef size,
|
||||
c10::optional<Layout> layout,
|
||||
c10::optional<Device> device,
|
||||
c10::optional<bool> pin_memory) {
|
||||
return native::randn(size, static_cast<const std::optional<Generator>&>(c10::nullopt), dtype, layout, device, pin_memory);
|
||||
return native::randn(size, static_cast<c10::optional<Generator>>(c10::nullopt), dtype, layout, device, pin_memory);
|
||||
}
|
||||
|
||||
Tensor randn(IntArrayRef size, const std::optional<Generator>& generator,
|
||||
Tensor randn(IntArrayRef size, c10::optional<Generator> generator,
|
||||
c10::optional<ScalarType> dtype,
|
||||
c10::optional<Layout> layout,
|
||||
c10::optional<Device> device,
|
||||
@ -1046,13 +1046,13 @@ Tensor& randn_out(IntArrayRef size, Tensor& result) {
|
||||
return native::randn_out(size, c10::nullopt, result);
|
||||
}
|
||||
|
||||
Tensor& randn_out(IntArrayRef size, const std::optional<Generator>& generator, Tensor& result) {
|
||||
Tensor& randn_out(IntArrayRef size, c10::optional<Generator> generator, Tensor& result) {
|
||||
result.resize_(size);
|
||||
return result.normal_(0, 1, std::move(generator));
|
||||
}
|
||||
|
||||
Tensor normal(double mean, double std, IntArrayRef size,
|
||||
const std::optional<Generator>& generator,
|
||||
c10::optional<Generator> generator,
|
||||
c10::optional<ScalarType> dtype,
|
||||
c10::optional<Layout> layout,
|
||||
c10::optional<Device> device,
|
||||
@ -1065,7 +1065,7 @@ Tensor normal(double mean, double std, IntArrayRef size,
|
||||
}
|
||||
|
||||
Tensor& normal_out(double mean, double std,
|
||||
IntArrayRef size, const std::optional<Generator>& generator, Tensor& result) {
|
||||
IntArrayRef size, c10::optional<Generator> generator, Tensor& result) {
|
||||
result.resize_(size);
|
||||
return result.normal_(mean, std, std::move(generator));
|
||||
}
|
||||
@ -1120,7 +1120,7 @@ Tensor randperm(int64_t n,
|
||||
return native::randperm(n, c10::nullopt, dtype, layout, device, pin_memory);
|
||||
}
|
||||
|
||||
Tensor randperm(int64_t n, const std::optional<Generator>& generator,
|
||||
Tensor randperm(int64_t n, c10::optional<Generator> generator,
|
||||
c10::optional<ScalarType> dtype,
|
||||
c10::optional<Layout> layout,
|
||||
c10::optional<Device> device,
|
||||
@ -1140,7 +1140,7 @@ Tensor& randperm_out(int64_t n, Tensor& result) {
|
||||
return at::randperm_out(result, n, c10::nullopt);
|
||||
}
|
||||
|
||||
Tensor& randperm_out_cpu(int64_t n, const std::optional<Generator>& generator, Tensor& result) {
|
||||
Tensor& randperm_out_cpu(int64_t n, c10::optional<Generator> generator, Tensor& result) {
|
||||
TORCH_CHECK(n >= 0, "n must be non-negative, got", n);
|
||||
TORCH_CHECK(!generator.has_value() || (generator.has_value() && result.device() == generator->device()), "Expected a '", result.device(), "' generator device but found '", generator->device(), "'");
|
||||
check_supported_max_int_with_precision(n, result);
|
||||
@ -1809,7 +1809,7 @@ Tensor randn(
|
||||
|
||||
Tensor randn(
|
||||
IntArrayRef size,
|
||||
const std::optional<Generator>& generator,
|
||||
c10::optional<Generator> generator,
|
||||
optional<DimnameList> names,
|
||||
c10::optional<ScalarType> dtype,
|
||||
c10::optional<Layout> layout,
|
||||
@ -1834,7 +1834,7 @@ Tensor rand(
|
||||
|
||||
Tensor rand(
|
||||
IntArrayRef size,
|
||||
const std::optional<Generator>& generator,
|
||||
c10::optional<Generator> generator,
|
||||
optional<DimnameList> names,
|
||||
c10::optional<ScalarType> dtype,
|
||||
c10::optional<Layout> layout,
|
||||
|
@ -93,23 +93,23 @@ DECLARE_DISPATCH(unary_fn, special_scaled_modified_bessel_k1_stub);
|
||||
DECLARE_DISPATCH(unary_fn, special_spherical_bessel_j0_stub);
|
||||
|
||||
// NB: these are actually defined in Distribution
|
||||
DECLARE_DISPATCH(void(*)(const TensorBase&, const TensorBase&, const std::optional<Generator>&), bernoulli_tensor_stub);
|
||||
DECLARE_DISPATCH(void(*)(const TensorBase&, const double, const std::optional<Generator>&), bernoulli_scalar_stub);
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, const std::optional<Generator>&), cauchy_stub);
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const std::optional<Generator>&), exponential_stub);
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const std::optional<Generator>&), geometric_stub);
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, const std::optional<Generator>&), log_normal_stub);
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, const std::optional<Generator>&), uniform_stub);
|
||||
DECLARE_DISPATCH(void(*)(const TensorBase&, const double, const double, const std::optional<Generator>&), normal_stub);
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const uint64_t, const int64_t, const std::optional<Generator>&), random_from_to_stub);
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const std::optional<Generator>&), random_full_64_bits_range_stub);
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const std::optional<Generator>&), random_stub);
|
||||
DECLARE_DISPATCH(void(*)(const TensorBase&, const TensorBase&, c10::optional<Generator>), bernoulli_tensor_stub);
|
||||
DECLARE_DISPATCH(void(*)(const TensorBase&, const double, c10::optional<Generator>), bernoulli_scalar_stub);
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, c10::optional<Generator>), cauchy_stub);
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, c10::optional<Generator>), exponential_stub);
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, c10::optional<Generator>), geometric_stub);
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, c10::optional<Generator>), log_normal_stub);
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, c10::optional<Generator>), uniform_stub);
|
||||
DECLARE_DISPATCH(void(*)(const TensorBase&, const double, const double, c10::optional<Generator>), normal_stub);
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const uint64_t, const int64_t, c10::optional<Generator>), random_from_to_stub);
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, c10::optional<Generator>), random_full_64_bits_range_stub);
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, c10::optional<Generator>), random_stub);
|
||||
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const int64_t, const double), kaiser_window_stub);
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const int64_t), polygamma_stub);
|
||||
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const Scalar& a, const Scalar& b), clamp_stub);
|
||||
DECLARE_DISPATCH(
|
||||
void (*)(Tensor&, const Tensor&, int64_t, const std::optional<Generator>&),
|
||||
void (*)(Tensor&, const Tensor&, int64_t, c10::optional<Generator>),
|
||||
multinomial_with_replacement_stub);
|
||||
DECLARE_DISPATCH(
|
||||
void (*)(
|
||||
|
@ -26,27 +26,27 @@
|
||||
namespace at::native {
|
||||
namespace {
|
||||
|
||||
static void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, const std::optional<Generator>& gen) {
|
||||
static void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, c10::optional<Generator> gen) {
|
||||
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
|
||||
templates::cpu::cauchy_kernel(iter, median, sigma, generator);
|
||||
}
|
||||
|
||||
void bernoulli_tensor_kernel(const TensorBase &self, const TensorBase &p_, const std::optional<Generator>& gen) {
|
||||
void bernoulli_tensor_kernel(const TensorBase &self, const TensorBase &p_, c10::optional<Generator> gen) {
|
||||
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
|
||||
templates::cpu::bernoulli_kernel(self, p_, generator);
|
||||
}
|
||||
|
||||
#if !AT_MKL_ENABLED()
|
||||
void bernoulli_scalar_kernel_default(const TensorBase &self, double p, const std::optional<Generator>& gen) {
|
||||
void bernoulli_scalar_kernel_default(const TensorBase &self, double p, c10::optional<Generator> gen) {
|
||||
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
|
||||
templates::cpu::bernoulli_kernel(self, p, generator);
|
||||
}
|
||||
|
||||
void bernoulli_scalar_kernel(const TensorBase &self, double p, const std::optional<Generator>& gen) {
|
||||
void bernoulli_scalar_kernel(const TensorBase &self, double p, c10::optional<Generator> gen) {
|
||||
bernoulli_scalar_kernel_default(self, p, gen);
|
||||
}
|
||||
#else
|
||||
void bernoulli_scalar_kernel(const TensorBase &self, double p, const std::optional<Generator>& gen) {
|
||||
void bernoulli_scalar_kernel(const TensorBase &self, double p, c10::optional<Generator> gen) {
|
||||
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
|
||||
int64_t seed;
|
||||
{
|
||||
@ -99,17 +99,17 @@ void bernoulli_scalar_kernel(const TensorBase &self, double p, const std::option
|
||||
}
|
||||
#endif
|
||||
|
||||
static void exponential_kernel_default(TensorIteratorBase& iter, double lambda, const std::optional<Generator>& gen) {
|
||||
static void exponential_kernel_default(TensorIteratorBase& iter, double lambda, c10::optional<Generator> gen) {
|
||||
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
|
||||
templates::cpu::exponential_kernel(iter, lambda, generator);
|
||||
}
|
||||
|
||||
#if (!AT_MKL_ENABLED() || defined(FBCODE_CAFFE2))
|
||||
void exponential_kernel(TensorIteratorBase& iter, double lambda, const std::optional<Generator>& gen) {
|
||||
void exponential_kernel(TensorIteratorBase& iter, double lambda, c10::optional<Generator> gen) {
|
||||
exponential_kernel_default(iter, lambda, gen);
|
||||
}
|
||||
#else
|
||||
void exponential_kernel(TensorIteratorBase &iter, double lambda, const std::optional<Generator>& gen) {
|
||||
void exponential_kernel(TensorIteratorBase &iter, double lambda, c10::optional<Generator> gen) {
|
||||
TORCH_CHECK(isFloatingType(iter.dtype()), "Exponential distribution is a continuous probability distribution. dtype must be a floating point but you specified ", iter.dtype());
|
||||
|
||||
Tensor self = iter.tensor(0);
|
||||
@ -195,32 +195,32 @@ void exponential_kernel(TensorIteratorBase &iter, double lambda, const std::opti
|
||||
}
|
||||
#endif
|
||||
|
||||
static void geometric_kernel(TensorIteratorBase& iter, double p, const std::optional<Generator>& gen) {
|
||||
static void geometric_kernel(TensorIteratorBase& iter, double p, c10::optional<Generator> gen) {
|
||||
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
|
||||
templates::cpu::geometric_kernel(iter, p, generator);
|
||||
}
|
||||
|
||||
static void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, const std::optional<Generator>& gen) {
|
||||
static void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, c10::optional<Generator> gen) {
|
||||
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
|
||||
templates::cpu::log_normal_kernel(iter, mean, std, generator);
|
||||
}
|
||||
|
||||
void uniform_kernel(TensorIteratorBase& iter, double from, double to, const std::optional<Generator>& gen) {
|
||||
void uniform_kernel(TensorIteratorBase& iter, double from, double to, c10::optional<Generator> gen) {
|
||||
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
|
||||
templates::cpu::uniform_kernel(iter, from, to, generator);
|
||||
}
|
||||
|
||||
void normal_kernel(const TensorBase &self, double mean, double std, const std::optional<Generator>& gen) {
|
||||
void normal_kernel(const TensorBase &self, double mean, double std, c10::optional<Generator> gen) {
|
||||
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
|
||||
templates::cpu::normal_kernel(self, mean, std, generator);
|
||||
}
|
||||
|
||||
static void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, const std::optional<Generator>& gen) {
|
||||
static void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional<Generator> gen) {
|
||||
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
|
||||
templates::cpu::random_from_to_kernel(iter, range, base, generator);
|
||||
}
|
||||
|
||||
static void random_kernel(TensorIteratorBase& iter, const std::optional<Generator>& gen) {
|
||||
static void random_kernel(TensorIteratorBase& iter, c10::optional<Generator> gen) {
|
||||
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
|
||||
templates::cpu::random_kernel(iter, generator);
|
||||
}
|
||||
@ -228,7 +228,7 @@ static void random_kernel(TensorIteratorBase& iter, const std::optional<Generato
|
||||
// This is the special kernel to handle single specific case:
|
||||
// from(inclusive) = std::numeric_limits<int64_t>::lowest()
|
||||
// to(exclusive) = None (= std::numeric_limits<int64_t>::max() + 1)
|
||||
static void random_full_64_bits_range_kernel(TensorIteratorBase& iter, const std::optional<Generator>& gen) {
|
||||
static void random_full_64_bits_range_kernel(TensorIteratorBase& iter, c10::optional<Generator> gen) {
|
||||
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
|
||||
templates::cpu::random_full_64_bits_range_kernel(iter, generator);
|
||||
}
|
||||
|
@ -58,10 +58,10 @@ void random_full_64_bits_range_kernel(TensorIteratorBase& iter, RNG generator) {
|
||||
|
||||
template<typename RNG>
|
||||
struct RandomFromToKernel {
|
||||
void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional<Generator> gen) {
|
||||
random_from_to_kernel(iter, range, base, check_generator<RNG>(gen));
|
||||
}
|
||||
void operator()(TensorIteratorBase& iter, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, c10::optional<Generator> gen) {
|
||||
random_full_64_bits_range_kernel(iter, check_generator<RNG>(gen));
|
||||
}
|
||||
};
|
||||
@ -79,7 +79,7 @@ void random_kernel(TensorIteratorBase& iter, RNG generator) {
|
||||
|
||||
template<typename RNG>
|
||||
struct RandomKernel {
|
||||
void operator()(TensorIteratorBase& iter, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, c10::optional<Generator> gen) {
|
||||
random_kernel(iter, check_generator<RNG>(gen));
|
||||
}
|
||||
};
|
||||
@ -200,7 +200,7 @@ void normal_kernel(const TensorBase &self, double mean, double std, RNG generato
|
||||
|
||||
template<typename RNG>
|
||||
struct NormalKernel {
|
||||
void operator()(Tensor& self, double mean, double std, const std::optional<Generator>& gen) {
|
||||
void operator()(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
|
||||
normal_kernel(self, mean, std, check_generator<RNG>(gen));
|
||||
}
|
||||
};
|
||||
@ -222,7 +222,7 @@ void uniform_kernel(TensorIteratorBase& iter, double from_, double to_, RNG gene
|
||||
|
||||
template<typename RNG>
|
||||
struct UniformKernel {
|
||||
void operator()(TensorIteratorBase& iter, double from, double to, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, double from, double to, c10::optional<Generator> gen) {
|
||||
uniform_kernel(iter, from, to, check_generator<RNG>(gen));
|
||||
}
|
||||
};
|
||||
@ -242,7 +242,7 @@ void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, RNG ge
|
||||
|
||||
template<typename RNG>
|
||||
struct CauchyKernel {
|
||||
void operator()(TensorIteratorBase& iter, double median, double sigma, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, double median, double sigma, c10::optional<Generator> gen) {
|
||||
cauchy_kernel(iter, median, sigma, check_generator<RNG>(gen));
|
||||
}
|
||||
};
|
||||
@ -262,7 +262,7 @@ void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, RNG ge
|
||||
|
||||
template<typename RNG>
|
||||
struct LogNormalKernel {
|
||||
void operator()(TensorIteratorBase& iter, double mean, double std, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, double mean, double std, c10::optional<Generator> gen) {
|
||||
log_normal_kernel(iter, mean, std, check_generator<RNG>(gen));
|
||||
}
|
||||
};
|
||||
@ -282,7 +282,7 @@ void geometric_kernel(TensorIteratorBase& iter, double p, RNG generator) {
|
||||
|
||||
template<typename RNG>
|
||||
struct GeometricKernel {
|
||||
void operator()(TensorIteratorBase& iter, double p, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, double p, c10::optional<Generator> gen) {
|
||||
geometric_kernel(iter, p, check_generator<RNG>(gen));
|
||||
}
|
||||
};
|
||||
@ -303,7 +303,7 @@ void exponential_kernel(TensorIteratorBase& iter, double lambda, RNG generator)
|
||||
|
||||
template<typename RNG>
|
||||
struct ExponentialKernel {
|
||||
void operator()(TensorIteratorBase& iter, double lambda, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, double lambda, c10::optional<Generator> gen) {
|
||||
exponential_kernel(iter, lambda, check_generator<RNG>(gen));
|
||||
}
|
||||
};
|
||||
@ -358,10 +358,10 @@ void bernoulli_kernel(const TensorBase &self, double p, RNG generator) {
|
||||
|
||||
template<typename RNG>
|
||||
struct BernoulliKernel {
|
||||
void operator()(const TensorBase &self, double p, const std::optional<Generator>& gen) {
|
||||
void operator()(const TensorBase &self, double p, c10::optional<Generator> gen) {
|
||||
bernoulli_kernel(self, p, check_generator<RNG>(gen));
|
||||
}
|
||||
void operator()(const TensorBase &self, const TensorBase &p_, const std::optional<Generator>& gen) {
|
||||
void operator()(const TensorBase &self, const TensorBase &p_, c10::optional<Generator> gen) {
|
||||
bernoulli_kernel(self, p_, check_generator<RNG>(gen));
|
||||
}
|
||||
};
|
||||
|
@ -24,7 +24,7 @@ multinomial_with_replacement_apply(
|
||||
Tensor& result,
|
||||
const Tensor& self,
|
||||
const int64_t n_sample,
|
||||
const std::optional<Generator>& generator) {
|
||||
c10::optional<Generator> generator) {
|
||||
auto gen = get_generator_or_default<CPUGeneratorImpl>(
|
||||
generator, detail::getDefaultCPUGenerator());
|
||||
// See Note [Acquire lock when using random generators]
|
||||
@ -128,7 +128,7 @@ multinomial_with_replacement_apply(
|
||||
Tensor& result,
|
||||
const Tensor& self,
|
||||
const int64_t n_sample,
|
||||
const std::optional<Generator>& generator) {
|
||||
c10::optional<Generator> generator) {
|
||||
auto gen = get_generator_or_default<CPUGeneratorImpl>(
|
||||
generator, detail::getDefaultCPUGenerator());
|
||||
// See Note [Acquire lock when using random generators]
|
||||
@ -230,7 +230,7 @@ static void multinomial_with_replacement_kernel_impl(
|
||||
Tensor& result,
|
||||
const Tensor& self,
|
||||
const int64_t n_sample,
|
||||
const std::optional<Generator>& gen) {
|
||||
c10::optional<Generator> gen) {
|
||||
AT_DISPATCH_FLOATING_TYPES_AND2(
|
||||
kHalf, kBFloat16, self.scalar_type(), "multinomial", [&] {
|
||||
multinomial_with_replacement_apply<scalar_t>(
|
||||
|
@ -23,12 +23,12 @@
|
||||
|
||||
namespace at::native {
|
||||
|
||||
void bernoulli_tensor_kernel(const TensorBase &self, const TensorBase &p_, const std::optional<Generator>& gen_) {
|
||||
void bernoulli_tensor_kernel(const TensorBase &self, const TensorBase &p_, c10::optional<Generator> gen_) {
|
||||
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
|
||||
at::native::templates::cuda::bernoulli_kernel(self, p_, generator);
|
||||
}
|
||||
|
||||
void bernoulli_scalar_kernel(const TensorBase &self, double p, const std::optional<Generator>& gen) {
|
||||
void bernoulli_scalar_kernel(const TensorBase &self, double p, c10::optional<Generator> gen) {
|
||||
auto iter = TensorIterator::borrowing_nullary_op(self);
|
||||
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
|
||||
at::native::templates::cuda::bernoulli_kernel(iter, p, generator);
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
namespace at::native {
|
||||
|
||||
void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, const std::optional<Generator>& gen) {
|
||||
void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, c10::optional<Generator> gen) {
|
||||
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
|
||||
at::native::templates::cuda::cauchy_kernel(iter, median, sigma, generator);
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
namespace at::native {
|
||||
|
||||
void exponential_kernel(TensorIteratorBase& iter, double lambda, const std::optional<Generator>& gen) {
|
||||
void exponential_kernel(TensorIteratorBase& iter, double lambda, c10::optional<Generator> gen) {
|
||||
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
|
||||
at::native::templates::cuda::exponential_kernel(iter, lambda, generator);
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
namespace at::native {
|
||||
|
||||
void geometric_kernel(TensorIteratorBase& iter, double p_, const std::optional<Generator>& gen) {
|
||||
void geometric_kernel(TensorIteratorBase& iter, double p_, c10::optional<Generator> gen) {
|
||||
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
|
||||
at::native::templates::cuda::geometric_kernel(iter, p_, generator);
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
namespace at::native {
|
||||
|
||||
void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, const std::optional<Generator>& gen) {
|
||||
void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, c10::optional<Generator> gen) {
|
||||
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
|
||||
at::native::templates::cuda::log_normal_kernel(iter, mean, std, generator);
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
namespace at::native {
|
||||
|
||||
void normal_kernel(const TensorBase &self, double mean, double std, const std::optional<Generator>& gen) {
|
||||
void normal_kernel(const TensorBase &self, double mean, double std, c10::optional<Generator> gen) {
|
||||
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
|
||||
at::native::templates::cuda::normal_kernel(self, mean, std, generator);
|
||||
}
|
||||
|
@ -5,17 +5,17 @@
|
||||
|
||||
namespace at::native {
|
||||
|
||||
void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, const std::optional<Generator>& gen_) {
|
||||
void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional<Generator> gen_) {
|
||||
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
|
||||
at::native::templates::cuda::random_from_to_kernel(iter, range, base, gen);
|
||||
}
|
||||
|
||||
void random_full_64_bits_range_kernel(TensorIteratorBase& iter, const std::optional<Generator>& gen_) {
|
||||
void random_full_64_bits_range_kernel(TensorIteratorBase& iter, c10::optional<Generator> gen_) {
|
||||
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
|
||||
at::native::templates::cuda::random_full_64_bits_range_kernel(iter, gen);
|
||||
}
|
||||
|
||||
void random_kernel(TensorIteratorBase& iter, const std::optional<Generator>& gen_) {
|
||||
void random_kernel(TensorIteratorBase& iter, c10::optional<Generator> gen_) {
|
||||
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
|
||||
at::native::templates::cuda::random_kernel(iter, gen);
|
||||
}
|
||||
|
@ -352,10 +352,10 @@ void random_full_64_bits_range_kernel(TensorIteratorBase& iter, RNG gen) {
|
||||
|
||||
template<typename RNG>
|
||||
struct RandomFromToKernel {
|
||||
void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional<Generator> gen) {
|
||||
random_from_to_kernel(iter, range, base, check_generator<RNG>(gen));
|
||||
}
|
||||
void operator()(TensorIteratorBase& iter, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, c10::optional<Generator> gen) {
|
||||
random_full_64_bits_range_kernel(iter, check_generator<RNG>(gen));
|
||||
}
|
||||
};
|
||||
@ -448,7 +448,7 @@ void normal_kernel(const TensorBase &self, double mean_, double std_, RNG gen) {
|
||||
|
||||
template<typename RNG>
|
||||
struct NormalKernel {
|
||||
void operator()(const TensorBase &self, double mean, double std, const std::optional<Generator>& gen) {
|
||||
void operator()(const TensorBase &self, double mean, double std, c10::optional<Generator> gen) {
|
||||
normal_kernel(self, mean, std, check_generator<RNG>(gen));
|
||||
}
|
||||
};
|
||||
@ -481,7 +481,7 @@ void uniform_kernel(TensorIteratorBase& iter, double from_, double to_, RNG gen)
|
||||
|
||||
template<typename RNG>
|
||||
struct UniformKernel {
|
||||
void operator()(TensorIteratorBase& iter, double from, double to, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, double from, double to, c10::optional<Generator> gen) {
|
||||
uniform_kernel(iter, from, to, check_generator<RNG>(gen));
|
||||
}
|
||||
};
|
||||
@ -504,7 +504,7 @@ void log_normal_kernel(TensorIteratorBase& iter, double mean_, double std_, RNG
|
||||
|
||||
template<typename RNG>
|
||||
struct LogNormalKernel {
|
||||
void operator()(TensorIteratorBase& iter, double mean, double std, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, double mean, double std, c10::optional<Generator> gen) {
|
||||
log_normal_kernel(iter, mean, std, check_generator<RNG>(gen));
|
||||
}
|
||||
};
|
||||
@ -525,7 +525,7 @@ void geometric_kernel(TensorIteratorBase& iter, double p, RNG gen) {
|
||||
|
||||
template<typename RNG>
|
||||
struct GeometricKernel {
|
||||
void operator()(TensorIteratorBase& iter, double p, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, double p, c10::optional<Generator> gen) {
|
||||
geometric_kernel(iter, p, check_generator<RNG>(gen));
|
||||
}
|
||||
};
|
||||
@ -548,7 +548,7 @@ void exponential_kernel(TensorIteratorBase& iter, double lambda_, RNG gen) {
|
||||
|
||||
template<typename RNG>
|
||||
struct ExponentialKernel {
|
||||
void operator()(TensorIteratorBase& iter, double lambda, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, double lambda, c10::optional<Generator> gen) {
|
||||
exponential_kernel(iter, lambda, check_generator<RNG>(gen));
|
||||
}
|
||||
};
|
||||
@ -571,7 +571,7 @@ void cauchy_kernel(TensorIteratorBase& iter, double median_, double sigma_, RNG
|
||||
|
||||
template<typename RNG>
|
||||
struct CauchyKernel {
|
||||
void operator()(TensorIteratorBase& iter, double median, double sigma, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, double median, double sigma, c10::optional<Generator> gen) {
|
||||
cauchy_kernel(iter, median, sigma, check_generator<RNG>(gen));
|
||||
}
|
||||
};
|
||||
@ -661,10 +661,10 @@ void bernoulli_kernel(TensorIteratorBase& iter, double p, RNG gen) {
|
||||
|
||||
template<typename RNG>
|
||||
struct BernoulliKernel {
|
||||
void operator()(TensorIteratorBase& iter, double p, const std::optional<Generator>& gen) {
|
||||
void operator()(TensorIteratorBase& iter, double p, c10::optional<Generator> gen) {
|
||||
bernoulli_kernel(iter, p, check_generator<RNG>(gen));
|
||||
}
|
||||
void operator()(const TensorBase &self, const TensorBase &p_, const std::optional<Generator>& gen) {
|
||||
void operator()(const TensorBase &self, const TensorBase &p_, c10::optional<Generator> gen) {
|
||||
bernoulli_kernel(self, p_, check_generator<RNG>(gen));
|
||||
}
|
||||
};
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
namespace at::native {
|
||||
|
||||
void uniform_kernel(TensorIteratorBase& iter, double from, double to, const std::optional<Generator>& gen) {
|
||||
void uniform_kernel(TensorIteratorBase& iter, double from, double to, c10::optional<Generator> gen) {
|
||||
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
|
||||
templates::cuda::uniform_kernel(iter, from, to, generator);
|
||||
}
|
||||
|
@ -18,14 +18,14 @@
|
||||
|
||||
namespace at::native {
|
||||
|
||||
Tensor _s_poisson_cuda(const Tensor& lambda, const std::optional<Generator>& gen_) {
|
||||
Tensor _s_poisson_cuda(const Tensor& lambda, c10::optional<Generator> gen_) {
|
||||
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
|
||||
Tensor ret = at::empty(lambda.sizes(), lambda.options());
|
||||
launch_poisson_cuda_kernel(ret, lambda, gen);
|
||||
return ret;
|
||||
}
|
||||
|
||||
Tensor _s_binomial_cuda(const Tensor& count, const Tensor& prob, const std::optional<Generator>& gen_) {
|
||||
Tensor _s_binomial_cuda(const Tensor& count, const Tensor& prob, c10::optional<Generator> gen_) {
|
||||
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
|
||||
Tensor ret = at::empty(count.sizes(), count.options());
|
||||
at::TensorIterator iter = at::TensorIteratorConfig()
|
||||
@ -37,14 +37,14 @@ Tensor _s_binomial_cuda(const Tensor& count, const Tensor& prob, const std::opti
|
||||
return ret;
|
||||
}
|
||||
|
||||
Tensor _s_gamma_cuda(const Tensor& alpha, const std::optional<Generator>& gen_) {
|
||||
Tensor _s_gamma_cuda(const Tensor& alpha, c10::optional<Generator> gen_) {
|
||||
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
|
||||
Tensor ret = at::empty(alpha.sizes(), alpha.options());
|
||||
launch_gamma_kernel(ret, alpha, gen);
|
||||
return ret;
|
||||
}
|
||||
|
||||
Tensor _s_dirichlet_cuda(const Tensor& alpha, const std::optional<Generator>& gen_) {
|
||||
Tensor _s_dirichlet_cuda(const Tensor& alpha, c10::optional<Generator> gen_) {
|
||||
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
|
||||
Tensor ret = at::empty(alpha.sizes(), alpha.options());
|
||||
launch_gamma_kernel(ret, alpha, gen);
|
||||
|
@ -387,7 +387,7 @@ native_dropout_cuda(const Tensor& self, double p, c10::optional<bool> train){
|
||||
|
||||
// TODO: _fused_dropout_cuda is to be removed, see PR #63937
|
||||
std::tuple<Tensor,Tensor>
|
||||
fused_dropout_cuda(const Tensor& self, double p, const std::optional<Generator>& gen_){
|
||||
fused_dropout_cuda(const Tensor& self, double p, c10::optional<Generator> gen_){
|
||||
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
|
||||
return dropout_cuda<uint8_t>(gen, self, p);
|
||||
}
|
||||
|
@ -328,7 +328,7 @@ void multinomial_with_replacement_kernel_impl(
|
||||
Tensor& result,
|
||||
const Tensor& self,
|
||||
const int64_t n_sample,
|
||||
const std::optional<Generator>& generator) {
|
||||
c10::optional<Generator> generator) {
|
||||
auto gen = get_generator_or_default<CUDAGeneratorImpl>(generator, cuda::detail::getDefaultCUDAGenerator());
|
||||
|
||||
int inputSize = self.dim();
|
||||
|
@ -55,7 +55,7 @@ namespace {
|
||||
template <int N> struct alignas(N) OpaqueType { char data[N]; };
|
||||
}
|
||||
|
||||
Tensor& randperm_out_cuda(int64_t n, const std::optional<Generator>& generator, Tensor& result) {
|
||||
Tensor& randperm_out_cuda(int64_t n, c10::optional<Generator> generator, Tensor& result) {
|
||||
TORCH_CHECK(n >= 0, "n must be non-negative, got", n);
|
||||
|
||||
check_supported_max_int_with_precision(n, result);
|
||||
|
@ -40,7 +40,7 @@ __global__ void randperm_handle_duplicate_keys_kernel(T *keys, scalar_t *data, T
|
||||
|
||||
// See note [Algorithm of randperm]
|
||||
template<typename T, typename scalar_t>
|
||||
void randperm_handle_duplicate_keys(T *keys, scalar_t *data, int bits, int64_t n, const std::optional<at::Generator> &gen_) {
|
||||
void randperm_handle_duplicate_keys(T *keys, scalar_t *data, int bits, int64_t n, c10::optional<at::Generator> &gen_) {
|
||||
auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(gen_, at::cuda::detail::getDefaultCUDAGenerator());
|
||||
int64_t counter_offset = n;
|
||||
at::PhiloxCudaState rng_engine_inputs;
|
||||
|
@ -74,7 +74,7 @@ inline void _rrelu_with_noise_cuda_train(
|
||||
const Tensor& noise_,
|
||||
const Scalar& lower_,
|
||||
const Scalar& upper_,
|
||||
const std::optional<Generator>& generator) {
|
||||
c10::optional<Generator> generator) {
|
||||
auto input = input_.contiguous();
|
||||
auto noise = noise_.contiguous();
|
||||
Tensor tmp_output = output.contiguous();
|
||||
@ -142,7 +142,7 @@ Tensor& rrelu_with_noise_out_cuda(const Tensor& self,
|
||||
const Scalar& lower,
|
||||
const Scalar& upper,
|
||||
bool training,
|
||||
const std::optional<Generator>& generator,
|
||||
c10::optional<Generator> generator,
|
||||
Tensor& output) {
|
||||
at::native::resize_output(output, self.sizes());
|
||||
|
||||
@ -176,7 +176,7 @@ Tensor rrelu_with_noise_cuda(
|
||||
const Scalar& lower,
|
||||
const Scalar& upper,
|
||||
bool training,
|
||||
const std::optional<Generator>& generator) {
|
||||
c10::optional<Generator> generator) {
|
||||
Tensor output = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
|
||||
return at::native::rrelu_with_noise_out_cuda(self, noise, lower, upper, training, generator, output);
|
||||
}
|
||||
@ -187,7 +187,7 @@ Tensor& rrelu_with_noise_cuda_(
|
||||
const Scalar& lower,
|
||||
const Scalar& upper,
|
||||
bool training,
|
||||
const std::optional<Generator>& generator) {
|
||||
c10::optional<Generator> generator) {
|
||||
return at::native::rrelu_with_noise_out_cuda(
|
||||
self, noise, lower, upper, training, generator, self);
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ Tensor& random_mps_impl(Tensor& self,
|
||||
const c10::optional<Tensor>& mean_opt,
|
||||
const c10::optional<Tensor>& std_opt,
|
||||
MPSGraphRandomDistribution distribution,
|
||||
const std::optional<Generator>& gen,
|
||||
c10::optional<Generator> gen,
|
||||
std::string op_name,
|
||||
RandomOpBlock randomBlock) {
|
||||
if (self.numel() == 0) {
|
||||
@ -144,7 +144,7 @@ static Tensor& normal_mps_impl(Tensor& self,
|
||||
double std_s,
|
||||
const c10::optional<Tensor>& mean_opt,
|
||||
const c10::optional<Tensor>& std_opt,
|
||||
const std::optional<Generator>& gen,
|
||||
c10::optional<Generator> gen,
|
||||
std::string op_name) {
|
||||
const Tensor& std_t = *(at::borrow_from_optional_tensor(std_opt));
|
||||
const Tensor& mean_t = *(at::borrow_from_optional_tensor(mean_opt));
|
||||
@ -198,7 +198,7 @@ static Tensor& normal_mps_impl(Tensor& self,
|
||||
|
||||
static Tensor& bernoulli_mps_impl(Tensor& self,
|
||||
const Tensor& prob_t,
|
||||
const std::optional<Generator>& gen,
|
||||
c10::optional<Generator> gen,
|
||||
std::string op_name) {
|
||||
TORCH_CHECK(prob_t.is_same_size(self) || prob_t.dim() == 0,
|
||||
op_name,
|
||||
@ -225,7 +225,7 @@ static Tensor& bernoulli_mps_impl(Tensor& self,
|
||||
|
||||
} // namespace mps
|
||||
|
||||
Tensor& uniform_mps_(Tensor& self, double from, double to, const std::optional<Generator>& gen) {
|
||||
Tensor& uniform_mps_(Tensor& self, double from, double to, c10::optional<Generator> gen) {
|
||||
auto scalar_type = self.scalar_type();
|
||||
if (scalar_type == ScalarType::ComplexFloat)
|
||||
scalar_type = ScalarType::Float;
|
||||
@ -257,16 +257,16 @@ Tensor& uniform_mps_(Tensor& self, double from, double to, const std::optional<G
|
||||
self, from, to, c10::nullopt, c10::nullopt, MPSGraphRandomDistributionUniform, gen, __func__, nullptr);
|
||||
}
|
||||
|
||||
Tensor& normal_mps_(Tensor& self, double mean, double std, const std::optional<Generator>& gen) {
|
||||
Tensor& normal_mps_(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
|
||||
return mps::normal_mps_impl(self, mean, std, c10::nullopt, c10::nullopt, gen, "normal");
|
||||
}
|
||||
|
||||
Tensor normal_mps(const Tensor& mean, double std, const std::optional<Generator>& gen) {
|
||||
Tensor normal_mps(const Tensor& mean, double std, c10::optional<Generator> gen) {
|
||||
Tensor self = at::empty(mean.sizes(), mean.scalar_type(), c10::nullopt, kMPS, c10::nullopt, c10::nullopt);
|
||||
return mps::normal_mps_impl(self, 0.0, std, mean, c10::nullopt, gen, "normal");
|
||||
}
|
||||
|
||||
Tensor normal_mps(double mean, const Tensor& std, const std::optional<Generator>& gen) {
|
||||
Tensor normal_mps(double mean, const Tensor& std, c10::optional<Generator> gen) {
|
||||
Tensor self = at::empty(std.sizes(), std.scalar_type(), c10::nullopt, kMPS, c10::nullopt, c10::nullopt);
|
||||
// when there's no tensor-type mean, we cannot pass scalar mean value due to the order of
|
||||
// multiply/add ops in random computation. So we create a mean tensor instead.
|
||||
@ -274,45 +274,45 @@ Tensor normal_mps(double mean, const Tensor& std, const std::optional<Generator>
|
||||
return mps::normal_mps_impl(self, 0.0, 1.0, mean_t, std, gen, "normal");
|
||||
}
|
||||
|
||||
Tensor normal_mps(const Tensor& mean, const Tensor& std, const std::optional<Generator>& gen) {
|
||||
Tensor normal_mps(const Tensor& mean, const Tensor& std, c10::optional<Generator> gen) {
|
||||
auto shape = at::infer_size(mean.sizes(), std.sizes());
|
||||
Tensor self = at::empty(shape, mean.scalar_type(), c10::nullopt, kMPS, c10::nullopt, c10::nullopt);
|
||||
return mps::normal_mps_impl(self, 0.0, 1.0, mean, std, gen, "normal");
|
||||
}
|
||||
|
||||
Tensor& normal_mps_out(const Tensor& mean, double std, const std::optional<Generator>& gen, Tensor& self) {
|
||||
Tensor& normal_mps_out(const Tensor& mean, double std, c10::optional<Generator> gen, Tensor& self) {
|
||||
return mps::normal_mps_impl(self, 0.0, std, mean, c10::nullopt, gen, "normal");
|
||||
}
|
||||
|
||||
Tensor& normal_mps_out(double mean, const Tensor& std, const std::optional<Generator>& gen, Tensor& self) {
|
||||
Tensor& normal_mps_out(double mean, const Tensor& std, c10::optional<Generator> gen, Tensor& self) {
|
||||
// when there's no tensor-type mean, we cannot pass scalar mean value due to the order of
|
||||
// multiply/add ops in random computation. So we create a mean tensor instead.
|
||||
Tensor mean_t = at::full_like(self, Scalar(mean));
|
||||
return mps::normal_mps_impl(self, 0.0, 1.0, mean_t, std, gen, "normal");
|
||||
}
|
||||
|
||||
Tensor& normal_mps_out(const Tensor& mean, const Tensor& std, const std::optional<Generator>& gen, Tensor& self) {
|
||||
Tensor& normal_mps_out(const Tensor& mean, const Tensor& std, c10::optional<Generator> gen, Tensor& self) {
|
||||
TORCH_CHECK(mean.numel() == std.numel(), "normal_mps_out: mean and std must have same number of elements")
|
||||
return mps::normal_mps_impl(self, 0.0, 1.0, mean, std, gen, "normal");
|
||||
}
|
||||
|
||||
Tensor& bernoulli_out_mps(const Tensor& p_, const std::optional<Generator>& gen, Tensor& result) {
|
||||
Tensor& bernoulli_out_mps(const Tensor& p_, c10::optional<Generator> gen, Tensor& result) {
|
||||
result.resize_(p_.sizes());
|
||||
return mps::bernoulli_mps_impl(result, p_, gen, __func__);
|
||||
}
|
||||
|
||||
Tensor& bernoulli_mps_(Tensor& self, double p, const std::optional<Generator>& gen) {
|
||||
Tensor& bernoulli_mps_(Tensor& self, double p, c10::optional<Generator> gen) {
|
||||
TORCH_CHECK(0.0 <= p && p <= 1.0, "bernoulli_mps_ expects p to be in [0, 1], but got p=", p);
|
||||
Tensor prob_t = at::full({}, Scalar(p), c10::TensorOptions().dtype(kFloat).device(kMPS));
|
||||
return mps::bernoulli_mps_impl(self, prob_t, gen, __func__);
|
||||
}
|
||||
|
||||
Tensor& bernoulli_mps_(Tensor& self, const Tensor& p_, const std::optional<Generator>& gen) {
|
||||
Tensor& bernoulli_mps_(Tensor& self, const Tensor& p_, c10::optional<Generator> gen) {
|
||||
return mps::bernoulli_mps_impl(self, p_, gen, __func__);
|
||||
}
|
||||
|
||||
// random_.from
|
||||
Tensor& random_mps_(Tensor& self, int64_t from, c10::optional<int64_t> to_opt, const std::optional<Generator>& gen) {
|
||||
Tensor& random_mps_(Tensor& self, int64_t from, c10::optional<int64_t> to_opt, c10::optional<Generator> gen) {
|
||||
auto input_dtype = self.scalar_type();
|
||||
int64_t to = 0;
|
||||
|
||||
@ -372,16 +372,16 @@ Tensor& random_mps_(Tensor& self, int64_t from, c10::optional<int64_t> to_opt, c
|
||||
self, from, to - 1, c10::nullopt, c10::nullopt, MPSGraphRandomDistributionUniform, gen, __func__, nullptr);
|
||||
}
|
||||
|
||||
Tensor& random_mps_(Tensor& self, int64_t to, const std::optional<Generator>& gen) {
|
||||
Tensor& random_mps_(Tensor& self, int64_t to, c10::optional<Generator> gen) {
|
||||
return random_mps_(self, 0, to, gen);
|
||||
}
|
||||
|
||||
Tensor& random_mps_(Tensor& self, const std::optional<Generator>& gen) {
|
||||
Tensor& random_mps_(Tensor& self, c10::optional<Generator> gen) {
|
||||
return random_mps_(self, 0, c10::nullopt, gen);
|
||||
}
|
||||
|
||||
// Exponential distribution
|
||||
Tensor& exponential_mps_(Tensor& self, double lambda, const std::optional<Generator>& gen) {
|
||||
Tensor& exponential_mps_(Tensor& self, double lambda, c10::optional<Generator> gen) {
|
||||
TORCH_CHECK(lambda > 0.0, "exponential_ expects lambda > 0.0, but found lambda=", lambda);
|
||||
|
||||
mps::RandomOpBlock random_op_block = ^RandomOpFn(cachedGraph, randomTensor) {
|
||||
@ -405,7 +405,7 @@ Tensor& exponential_mps_(Tensor& self, double lambda, const std::optional<Genera
|
||||
random_op_block);
|
||||
}
|
||||
|
||||
Tensor& randperm_out_mps(int64_t n, const std::optional<Generator>& generator, Tensor& result) {
|
||||
Tensor& randperm_out_mps(int64_t n, c10::optional<Generator> generator, Tensor& result) {
|
||||
if (!is_macos_13_or_newer()) {
|
||||
TORCH_WARN_ONCE("MPS: randperm op is supported natively starting from macOS 13.0. ",
|
||||
"Falling back on CPU. This may have performance implications.");
|
||||
@ -453,7 +453,7 @@ Tensor& randperm_out_mps(int64_t n, const std::optional<Generator>& generator, T
|
||||
|
||||
static Tensor& multinomial_with_replacement_mps_kernel(const Tensor& self,
|
||||
const int64_t n_sample,
|
||||
const std::optional<Generator>& generator,
|
||||
c10::optional<Generator> generator,
|
||||
Tensor& result) {
|
||||
using namespace mps;
|
||||
|
||||
@ -581,7 +581,7 @@ constexpr int64_t FLOAT32_MAX_CONSECUTIVE_INT = 1 << (FLT_MANT_DIG);
|
||||
Tensor& multinomial_out_mps(const Tensor& self,
|
||||
int64_t n_sample,
|
||||
bool with_replacement,
|
||||
const std::optional<Generator>& gen,
|
||||
c10::optional<Generator> gen,
|
||||
Tensor& result) {
|
||||
TORCH_CHECK(result.device() == self.device(), "multinomial arguments must have the same device");
|
||||
TORCH_CHECK(self.dim() > 0 && self.dim() <= 2, "prob_dist must be 1 or 2 dim");
|
||||
@ -652,10 +652,7 @@ Tensor& multinomial_out_mps(const Tensor& self,
|
||||
return result;
|
||||
}
|
||||
|
||||
Tensor multinomial_mps(const Tensor& self,
|
||||
int64_t n_sample,
|
||||
bool with_replacement,
|
||||
const std::optional<Generator>& gen) {
|
||||
Tensor multinomial_mps(const Tensor& self, int64_t n_sample, bool with_replacement, c10::optional<Generator> gen) {
|
||||
Tensor result = at::empty({0}, self.options().dtype(kLong));
|
||||
multinomial_out_mps(self, n_sample, with_replacement, gen, result);
|
||||
return result;
|
||||
|
@ -931,7 +931,7 @@ Tensor reshape_as_nested(const Tensor& self, const Tensor& other) {
|
||||
return self.reshape(sizes);
|
||||
}
|
||||
|
||||
Tensor& normal_nested_(Tensor& self, double mean, double std, const std::optional<Generator>& gen) {
|
||||
Tensor& normal_nested_(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
|
||||
const auto& self_buf = get_nested_tensor_impl(self)->get_buffer();
|
||||
self_buf.normal_(mean, std, gen);
|
||||
return self;
|
||||
|
@ -326,7 +326,7 @@ Tensor& normal_sparse_csr_(
|
||||
Tensor& self,
|
||||
double mean,
|
||||
double std,
|
||||
const std::optional<Generator>& gen) {
|
||||
c10::optional<Generator> gen) {
|
||||
return unary_op_inplace(self, &Tensor::normal_, mean, std, gen);
|
||||
}
|
||||
|
||||
|
@ -354,7 +354,7 @@ mha_fwd(const at::Tensor &q, // batch_size x seqlen_q x num_heads x head
|
||||
int window_size_left,
|
||||
int window_size_right,
|
||||
const bool return_softmax,
|
||||
const std::optional<at::Generator>& gen_) {
|
||||
c10::optional<at::Generator> gen_) {
|
||||
|
||||
auto dprops = at::cuda::getCurrentDeviceProperties();
|
||||
// bool is_sm75 = dprops->major == 7 && dprops->minor == 5;
|
||||
@ -546,7 +546,7 @@ mha_varlen_fwd(const at::Tensor &q, // total_q x num_heads x head_size, total_q
|
||||
int window_size_left,
|
||||
int window_size_right,
|
||||
const bool return_softmax,
|
||||
const std::optional<at::Generator>& gen_) {
|
||||
c10::optional<at::Generator> gen_) {
|
||||
|
||||
auto dprops = at::cuda::getCurrentDeviceProperties();
|
||||
// bool is_sm75 = dprops->major == 7 && dprops->minor == 5;
|
||||
|
@ -19,7 +19,7 @@ mha_fwd(const at::Tensor &q, // batch_size x seqlen_q x num_heads x head
|
||||
int window_size_left,
|
||||
int window_size_right,
|
||||
const bool return_softmax,
|
||||
const std::optional<at::Generator>& gen_);
|
||||
c10::optional<at::Generator> gen_);
|
||||
|
||||
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor>
|
||||
mha_varlen_fwd(const at::Tensor &q, // total_q x num_heads x head_size, total_q := \sum_{i=0}^{b} s_i
|
||||
@ -39,7 +39,7 @@ mha_varlen_fwd(const at::Tensor &q, // total_q x num_heads x head_size, total_q
|
||||
int window_size_left,
|
||||
int window_size_right,
|
||||
const bool return_softmax,
|
||||
const std::optional<at::Generator>& gen_);
|
||||
c10::optional<at::Generator> gen_);
|
||||
|
||||
|
||||
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
|
||||
|
@ -113,7 +113,7 @@ mha_fwd(const at::Tensor &q, // batch_size x seqlen_q x num_heads x head
|
||||
int window_size_left,
|
||||
int window_size_right,
|
||||
const bool return_softmax,
|
||||
const std::optional<at::Generator>& gen_) {
|
||||
c10::optional<at::Generator> gen_) {
|
||||
check_gpu_arch();
|
||||
|
||||
auto q_dtype = q.dtype();
|
||||
@ -322,7 +322,7 @@ mha_varlen_fwd(const at::Tensor &q, // total_q x num_heads x head_size, total_q
|
||||
int window_size_left,
|
||||
int window_size_right,
|
||||
const bool return_softmax,
|
||||
const std::optional<at::Generator>& gen_) {
|
||||
c10::optional<at::Generator> gen_) {
|
||||
|
||||
TORCH_CHECK(false, "mha_varlen_fwd not supported on ROCm");
|
||||
|
||||
|
@ -16,7 +16,7 @@ Tensor& uniform_(
|
||||
Tensor& self,
|
||||
const double from,
|
||||
const double to,
|
||||
const std::optional<at::Generator>& /* not implemented */) {
|
||||
const c10::optional<at::Generator> /* not implemented */) {
|
||||
TORCH_CHECK(
|
||||
self.is_vulkan(),
|
||||
"Vulkan: In-place operator is only supported on Vulkan tensors.");
|
||||
@ -75,7 +75,7 @@ Tensor& normal_(
|
||||
Tensor& self,
|
||||
const double mean,
|
||||
const double std,
|
||||
const std::optional<at::Generator>& /* not implemented */) {
|
||||
const c10::optional<at::Generator> /* not implemented */) {
|
||||
TORCH_CHECK(
|
||||
self.is_vulkan(),
|
||||
"Vulkan: In-place operator is only supported on Vulkan tensors.");
|
||||
|
@ -44,89 +44,89 @@ struct TestCPUGenerator : public c10::GeneratorImpl {
|
||||
|
||||
// ==================================================== Random ========================================================
|
||||
|
||||
Tensor& random_(Tensor& self, const std::optional<Generator>& generator) {
|
||||
Tensor& random_(Tensor& self, c10::optional<Generator> generator) {
|
||||
return at::native::templates::random_impl<native::templates::cpu::RandomKernel, TestCPUGenerator>(self, generator);
|
||||
}
|
||||
|
||||
Tensor& random_from_to(Tensor& self, int64_t from, optional<int64_t> to, const std::optional<Generator>& generator) {
|
||||
Tensor& random_from_to(Tensor& self, int64_t from, optional<int64_t> to, c10::optional<Generator> generator) {
|
||||
return at::native::templates::random_from_to_impl<native::templates::cpu::RandomFromToKernel, TestCPUGenerator>(self, from, to, generator);
|
||||
}
|
||||
|
||||
Tensor& random_to(Tensor& self, int64_t to, const std::optional<Generator>& generator) {
|
||||
Tensor& random_to(Tensor& self, int64_t to, c10::optional<Generator> generator) {
|
||||
return random_from_to(self, 0, to, generator);
|
||||
}
|
||||
|
||||
// ==================================================== Normal ========================================================
|
||||
|
||||
Tensor& normal_(Tensor& self, double mean, double std, const std::optional<Generator>& gen) {
|
||||
Tensor& normal_(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
|
||||
return at::native::templates::normal_impl_<native::templates::cpu::NormalKernel, TestCPUGenerator>(self, mean, std, gen);
|
||||
}
|
||||
|
||||
Tensor& normal_Tensor_float_out(const Tensor& mean, double std, const std::optional<Generator>& gen, Tensor& output) {
|
||||
Tensor& normal_Tensor_float_out(const Tensor& mean, double std, c10::optional<Generator> gen, Tensor& output) {
|
||||
return at::native::templates::normal_out_impl<native::templates::cpu::NormalKernel, TestCPUGenerator>(output, mean, std, gen);
|
||||
}
|
||||
|
||||
Tensor& normal_float_Tensor_out(double mean, const Tensor& std, const std::optional<Generator>& gen, Tensor& output) {
|
||||
Tensor& normal_float_Tensor_out(double mean, const Tensor& std, c10::optional<Generator> gen, Tensor& output) {
|
||||
return at::native::templates::normal_out_impl<native::templates::cpu::NormalKernel, TestCPUGenerator>(output, mean, std, gen);
|
||||
}
|
||||
|
||||
Tensor& normal_Tensor_Tensor_out(const Tensor& mean, const Tensor& std, const std::optional<Generator>& gen, Tensor& output) {
|
||||
Tensor& normal_Tensor_Tensor_out(const Tensor& mean, const Tensor& std, c10::optional<Generator> gen, Tensor& output) {
|
||||
return at::native::templates::normal_out_impl<native::templates::cpu::NormalKernel, TestCPUGenerator>(output, mean, std, gen);
|
||||
}
|
||||
|
||||
Tensor normal_Tensor_float(const Tensor& mean, double std, const std::optional<Generator>& gen) {
|
||||
Tensor normal_Tensor_float(const Tensor& mean, double std, c10::optional<Generator> gen) {
|
||||
return at::native::templates::normal_impl<native::templates::cpu::NormalKernel, TestCPUGenerator>(mean, std, gen);
|
||||
}
|
||||
|
||||
Tensor normal_float_Tensor(double mean, const Tensor& std, const std::optional<Generator>& gen) {
|
||||
Tensor normal_float_Tensor(double mean, const Tensor& std, c10::optional<Generator> gen) {
|
||||
return at::native::templates::normal_impl<native::templates::cpu::NormalKernel, TestCPUGenerator>(mean, std, gen);
|
||||
}
|
||||
|
||||
Tensor normal_Tensor_Tensor(const Tensor& mean, const Tensor& std, const std::optional<Generator>& gen) {
|
||||
Tensor normal_Tensor_Tensor(const Tensor& mean, const Tensor& std, c10::optional<Generator> gen) {
|
||||
return at::native::templates::normal_impl<native::templates::cpu::NormalKernel, TestCPUGenerator>(mean, std, gen);
|
||||
}
|
||||
|
||||
// ==================================================== Uniform =======================================================
|
||||
|
||||
Tensor& uniform_(Tensor& self, double from, double to, const std::optional<Generator>& generator) {
|
||||
Tensor& uniform_(Tensor& self, double from, double to, c10::optional<Generator> generator) {
|
||||
return at::native::templates::uniform_impl_<native::templates::cpu::UniformKernel, TestCPUGenerator>(self, from, to, generator);
|
||||
}
|
||||
|
||||
// ==================================================== Cauchy ========================================================
|
||||
|
||||
Tensor& cauchy_(Tensor& self, double median, double sigma, const std::optional<Generator>& generator) {
|
||||
Tensor& cauchy_(Tensor& self, double median, double sigma, c10::optional<Generator> generator) {
|
||||
return at::native::templates::cauchy_impl_<native::templates::cpu::CauchyKernel, TestCPUGenerator>(self, median, sigma, generator);
|
||||
}
|
||||
|
||||
// ================================================== LogNormal =======================================================
|
||||
|
||||
Tensor& log_normal_(Tensor& self, double mean, double std, const std::optional<Generator>& gen) {
|
||||
Tensor& log_normal_(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
|
||||
return at::native::templates::log_normal_impl_<native::templates::cpu::LogNormalKernel, TestCPUGenerator>(self, mean, std, gen);
|
||||
}
|
||||
|
||||
// ================================================== Geometric =======================================================
|
||||
|
||||
Tensor& geometric_(Tensor& self, double p, const std::optional<Generator>& gen) {
|
||||
Tensor& geometric_(Tensor& self, double p, c10::optional<Generator> gen) {
|
||||
return at::native::templates::geometric_impl_<native::templates::cpu::GeometricKernel, TestCPUGenerator>(self, p, gen);
|
||||
}
|
||||
|
||||
// ================================================== Exponential =====================================================
|
||||
|
||||
Tensor& exponential_(Tensor& self, double lambda, const std::optional<Generator>& gen) {
|
||||
Tensor& exponential_(Tensor& self, double lambda, c10::optional<Generator> gen) {
|
||||
return at::native::templates::exponential_impl_<native::templates::cpu::ExponentialKernel, TestCPUGenerator>(self, lambda, gen);
|
||||
}
|
||||
|
||||
// ================================================== Bernoulli =======================================================
|
||||
|
||||
Tensor& bernoulli_Tensor(Tensor& self, const Tensor& p_, const std::optional<Generator>& gen) {
|
||||
Tensor& bernoulli_Tensor(Tensor& self, const Tensor& p_, c10::optional<Generator> gen) {
|
||||
return at::native::templates::bernoulli_impl_<native::templates::cpu::BernoulliKernel, TestCPUGenerator>(self, p_, gen);
|
||||
}
|
||||
|
||||
Tensor& bernoulli_float(Tensor& self, double p, const std::optional<Generator>& gen) {
|
||||
Tensor& bernoulli_float(Tensor& self, double p, c10::optional<Generator> gen) {
|
||||
return at::native::templates::bernoulli_impl_<native::templates::cpu::BernoulliKernel, TestCPUGenerator>(self, p, gen);
|
||||
}
|
||||
|
||||
Tensor& bernoulli_out(const Tensor& self, const std::optional<Generator>& gen, Tensor& result) {
|
||||
Tensor& bernoulli_out(const Tensor& self, c10::optional<Generator> gen, Tensor& result) {
|
||||
return at::native::templates::bernoulli_out_impl<native::templates::cpu::BernoulliKernel, TestCPUGenerator>(result, self, gen);
|
||||
}
|
||||
|
||||
|
@ -174,7 +174,7 @@ TEST(RandomPermutationTest, TestIslandShuffle) {
|
||||
bool shuffled2 = false;
|
||||
for (int i = 0; i < 100; i++) {
|
||||
cudaDeviceSynchronize();
|
||||
std::optional<at::Generator> gen = c10::nullopt;
|
||||
c10::optional<at::Generator> gen = c10::nullopt;
|
||||
randperm_handle_duplicate_keys(keys, values, 8, 5, gen);
|
||||
cudaDeviceSynchronize();
|
||||
std::vector<int> slice1 = {values[0], values[1], values[2]};
|
||||
|
@ -33,15 +33,15 @@ struct TestCPUGenerator : public c10::GeneratorImpl {
|
||||
uint64_t value_;
|
||||
};
|
||||
|
||||
Tensor& random_(Tensor& self, const std::optional<Generator>& generator) {
|
||||
Tensor& random_(Tensor& self, c10::optional<Generator> generator) {
|
||||
return at::native::templates::random_impl<native::templates::cpu::RandomKernel, TestCPUGenerator>(self, generator);
|
||||
}
|
||||
|
||||
Tensor& random_from_to(Tensor& self, int64_t from, optional<int64_t> to, const std::optional<Generator>& generator) {
|
||||
Tensor& random_from_to(Tensor& self, int64_t from, optional<int64_t> to, c10::optional<Generator> generator) {
|
||||
return at::native::templates::random_from_to_impl<native::templates::cpu::RandomFromToKernel, TestCPUGenerator>(self, from, to, generator);
|
||||
}
|
||||
|
||||
Tensor& random_to(Tensor& self, int64_t to, const std::optional<Generator>& generator) {
|
||||
Tensor& random_to(Tensor& self, int64_t to, c10::optional<Generator> generator) {
|
||||
return random_from_to(self, 0, to, generator);
|
||||
}
|
||||
|
||||
|
@ -678,7 +678,7 @@ void addInputs(
|
||||
void addInputs(
|
||||
Node* n,
|
||||
const char* name,
|
||||
const std::optional<at::Generator>& value) {
|
||||
const c10::optional<at::Generator>& value) {
|
||||
Graph* g = n->owningGraph();
|
||||
|
||||
if (value.has_value() && value->defined()) {
|
||||
|
@ -340,7 +340,7 @@ TORCH_API void addInputs(
|
||||
TORCH_API void addInputs(
|
||||
Node* n,
|
||||
const char* name,
|
||||
const std::optional<at::Generator>& value);
|
||||
const c10::optional<at::Generator>& value);
|
||||
|
||||
inline void addInputs(
|
||||
Node* n,
|
||||
|
@ -406,7 +406,7 @@ RegisterOperators reg({
|
||||
double a;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
double b;
|
||||
std::optional<at::Generator> generator =
|
||||
c10::optional<at::Generator> generator =
|
||||
pop(stack).toOptional<at::Generator>();
|
||||
|
||||
pop(stack, tensor, a, b);
|
||||
@ -425,7 +425,7 @@ RegisterOperators reg({
|
||||
double mean;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
double std;
|
||||
std::optional<at::Generator> generator =
|
||||
c10::optional<at::Generator> generator =
|
||||
pop(stack).toOptional<at::Generator>();
|
||||
|
||||
pop(stack, tensor, mean, std);
|
||||
|
@ -177,14 +177,14 @@ std::vector<Shape> compute_shape_abs(const at::Tensor& self) {
|
||||
|
||||
std::vector<Shape> compute_shape_bernoulli(
|
||||
const at::Tensor& self,
|
||||
const std::optional<at::Generator>& generator) {
|
||||
c10::optional<at::Generator> generator) {
|
||||
return {Shape(self.scalar_type(), self.sizes().vec())};
|
||||
}
|
||||
|
||||
std::vector<Shape> compute_shape_bernoulli(
|
||||
const at::Tensor& self,
|
||||
double p,
|
||||
const std::optional<at::Generator>& generator) {
|
||||
c10::optional<at::Generator> generator) {
|
||||
return compute_shape_bernoulli(self, generator);
|
||||
}
|
||||
|
||||
@ -692,14 +692,14 @@ std::vector<Shape> compute_shape_native_dropout_backward(
|
||||
|
||||
std::vector<Shape> compute_shape_random(
|
||||
const at::Tensor& self,
|
||||
const std::optional<at::Generator>& generator) {
|
||||
c10::optional<at::Generator> generator) {
|
||||
return {Shape(self.scalar_type(), self.sizes().vec())};
|
||||
}
|
||||
|
||||
std::vector<Shape> compute_shape_random(
|
||||
const at::Tensor& self,
|
||||
int64_t to,
|
||||
const std::optional<at::Generator>& generator) {
|
||||
c10::optional<at::Generator> generator) {
|
||||
return compute_shape_random(self, generator);
|
||||
}
|
||||
|
||||
@ -707,7 +707,7 @@ std::vector<Shape> compute_shape_random(
|
||||
const at::Tensor& self,
|
||||
int64_t from,
|
||||
c10::optional<int64_t> to,
|
||||
const std::optional<at::Generator>& generator) {
|
||||
c10::optional<at::Generator> generator) {
|
||||
return compute_shape_random(self, generator);
|
||||
}
|
||||
|
||||
@ -1372,7 +1372,7 @@ std::vector<Shape> compute_shape_normal_functional(
|
||||
const at::Tensor& self,
|
||||
double mean,
|
||||
double std,
|
||||
const std::optional<at::Generator>& generator) {
|
||||
c10::optional<at::Generator> generator) {
|
||||
return {Shape(self.scalar_type(), self.sizes().vec())};
|
||||
}
|
||||
|
||||
@ -1380,7 +1380,7 @@ std::vector<Shape> compute_shape_uniform(
|
||||
const at::Tensor& self,
|
||||
double from,
|
||||
double to,
|
||||
const std::optional<at::Generator>& generator) {
|
||||
c10::optional<at::Generator> generator) {
|
||||
return {Shape(self.scalar_type(), self.sizes().vec())};
|
||||
}
|
||||
|
||||
|
@ -24,8 +24,8 @@ TORCH_API std::vector<torch::lazy::Shape> compute_shape__adaptive_avg_pool3d(con
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape__adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_abs(const at::Tensor & self);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_arange_out(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_bernoulli(const at::Tensor & self, const ::std::optional<at::Generator> & generator);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_bernoulli(const at::Tensor & self, double p, const ::std::optional<at::Generator> & generator);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_bernoulli(const at::Tensor & self, c10::optional<at::Generator> generator);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_bernoulli(const at::Tensor & self, double p, c10::optional<at::Generator> generator);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_cat(at::TensorList tensors, int64_t dim);
|
||||
@ -70,10 +70,10 @@ TORCH_API std::vector<torch::lazy::Shape> compute_shape_new_empty_strided(const
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_nonzero(const at::Tensor & self);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_normal_functional(const at::Tensor & self, double mean, double std, const ::std::optional<at::Generator> & generator);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_random(const at::Tensor & self, const ::std::optional<at::Generator> & generator);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_random(const at::Tensor & self, int64_t to, const ::std::optional<at::Generator> & generator);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_random(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, const ::std::optional<at::Generator> & generator);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_normal_functional(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_random(const at::Tensor & self, c10::optional<at::Generator> generator);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_random(const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_random(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_relu(const at::Tensor & self);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_repeat(const at::Tensor & self, at::IntArrayRef repeats);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_slogdet(const at::Tensor & self);
|
||||
@ -92,7 +92,7 @@ TORCH_API std::vector<torch::lazy::Shape> compute_shape_narrow_copy_symint(const
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_hardswish(const at::Tensor & self);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_selu(const at::Tensor & self);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_uniform(const at::Tensor & self, double from, double to, const ::std::optional<at::Generator> & generator);
|
||||
TORCH_API std::vector<torch::lazy::Shape> compute_shape_uniform(const at::Tensor & self, double from, double to, c10::optional<at::Generator> generator);
|
||||
|
||||
// Non-Native ops
|
||||
TORCH_API std::vector<Shape> compute_shape_scalar(const at::Scalar& value, const at::ScalarType& type);
|
||||
|
@ -246,7 +246,7 @@ struct PythonArgs {
|
||||
inline std::vector<int64_t> intlistWithDefault(
|
||||
int i,
|
||||
std::vector<int64_t> default_intlist);
|
||||
inline std::optional<at::Generator> generator(int i);
|
||||
inline c10::optional<at::Generator> generator(int i);
|
||||
inline at::Storage storage(int i);
|
||||
inline at::Storage storage(
|
||||
int i,
|
||||
@ -1069,7 +1069,7 @@ inline bool PythonArgs::isNone(int i) {
|
||||
return args[i] == nullptr;
|
||||
}
|
||||
|
||||
inline std::optional<at::Generator> PythonArgs::generator(int i) {
|
||||
inline c10::optional<at::Generator> PythonArgs::generator(int i) {
|
||||
if (!args[i])
|
||||
return c10::nullopt;
|
||||
return reinterpret_cast<THPGenerator*>(args[i])->cdata;
|
||||
|
@ -144,9 +144,6 @@ def argumenttype_type(
|
||||
remove_non_owning_ref_types=remove_non_owning_ref_types,
|
||||
)
|
||||
if r is not None:
|
||||
if isinstance(t, OptionalType) and not mutable:
|
||||
if str(t.elem) == "Generator":
|
||||
return NamedCType(binds, ConstRefCType(r.type))
|
||||
return r
|
||||
|
||||
if isinstance(t, BaseType):
|
||||
|
@ -12,7 +12,6 @@ if we want to generate code for another C++ library.
|
||||
Add new types to `types.py` if these types are ATen/c10 related.
|
||||
Add new types to `types_base.py` if they are basic and not attached to ATen/c10.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict
|
||||
|
||||
@ -32,6 +31,7 @@ from .types_base import (
|
||||
shortT,
|
||||
)
|
||||
|
||||
|
||||
TENSOR_LIST_LIKE_CTYPES = [
|
||||
"at::TensorList",
|
||||
"const c10::List<c10::optional<at::Tensor>> &",
|
||||
@ -133,13 +133,9 @@ class OptionalCType(CType):
|
||||
|
||||
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
||||
# Do not pass `strip_ref` recursively.
|
||||
if "Generator" in self.elem.cpp_type():
|
||||
return f"::std::optional<{self.elem.cpp_type()}>"
|
||||
return f"c10::optional<{self.elem.cpp_type()}>"
|
||||
|
||||
def cpp_type_registration_declarations(self) -> str:
|
||||
if "Generator" in self.elem.cpp_type_registration_declarations():
|
||||
return f"::std::optional<{self.elem.cpp_type_registration_declarations()}>"
|
||||
return f"c10::optional<{self.elem.cpp_type_registration_declarations()}>"
|
||||
|
||||
def remove_const_ref(self) -> "CType":
|
||||
|
@ -95,13 +95,11 @@ class ConstRefCType(CType):
|
||||
elem: "CType"
|
||||
|
||||
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
||||
if isinstance(self.elem, ConstRefCType) or strip_ref:
|
||||
if strip_ref:
|
||||
return self.elem.cpp_type(strip_ref=strip_ref)
|
||||
return f"const {self.elem.cpp_type()} &"
|
||||
|
||||
def cpp_type_registration_declarations(self) -> str:
|
||||
if isinstance(self.elem, ConstRefCType):
|
||||
return self.elem.cpp_type_registration_declarations()
|
||||
return f"const {self.elem.cpp_type_registration_declarations()} &"
|
||||
|
||||
def remove_const_ref(self) -> "CType":
|
||||
|
@ -16,7 +16,6 @@ from torchgen.api.translate import translate
|
||||
from torchgen.api.types import (
|
||||
BaseCType,
|
||||
Binding,
|
||||
ConstRefCType,
|
||||
deviceT,
|
||||
DispatcherSignature,
|
||||
kernel_signature,
|
||||
@ -246,9 +245,7 @@ class GenLazyIR(ABC):
|
||||
value_args = schema.filtered_args(values=True, scalars=False)
|
||||
scalar_args = schema.filtered_args(values=False, scalars=True)
|
||||
|
||||
ctor_args = [
|
||||
f"{ConstRefCType(i.lazy_type).cpp_type()} {i.name}" for i in all_args
|
||||
]
|
||||
ctor_args = [f"const {i.lazy_type.cpp_type()}& {i.name}" for i in all_args]
|
||||
reuse_ctor_args = ", ".join(ctor_args)
|
||||
if self.use_lazy_shape and schema.properties.ShapePrecompute:
|
||||
ctor_args.append("std::vector<torch::lazy::Shape>&& shapes")
|
||||
|
Reference in New Issue
Block a user