c10::string_view -> std::string_view in autograd (#142354)

Differential Revision: D66939966

Pull Request resolved: https://github.com/pytorch/pytorch/pull/142354
Approved by: https://github.com/Skylion007
This commit is contained in:
Richard Barnes
2024-12-10 15:43:39 +00:00
committed by PyTorch MergeBot
parent 7e41717a26
commit 882b6af219
6 changed files with 29 additions and 29 deletions

View File

@ -965,7 +965,7 @@ def saved_variables(
if nctype.type == OptionalCType(BaseCType(stringT)):
formula = re.sub(
rf"\b{name}\b",
f"{name}.has_value() ? std::optional<c10::string_view>({name}.value()) : std::nullopt",
f"{name}.has_value() ? std::optional<std::string_view>({name}.value()) : std::nullopt",
formula,
)

View File

@ -606,7 +606,7 @@ Tensor div_tensor_self_backward(
const Tensor& grad,
T other,
ScalarType self_st,
const std::optional<c10::string_view>& rounding_mode) {
const std::optional<std::string_view>& rounding_mode) {
if (rounding_mode.has_value()) {
return at::zeros_like(grad, grad.options().dtype(self_st));
}
@ -618,18 +618,18 @@ template Tensor div_tensor_self_backward(
const Tensor&,
Tensor,
ScalarType,
const std::optional<c10::string_view>&);
const std::optional<std::string_view>&);
template Tensor div_tensor_self_backward(
const Tensor&,
Scalar,
ScalarType,
const std::optional<c10::string_view>&);
const std::optional<std::string_view>&);
Tensor div_tensor_other_backward(
const Tensor& grad,
const Tensor& self,
const Tensor& other,
const std::optional<c10::string_view>& rounding_mode) {
const std::optional<std::string_view>& rounding_mode) {
if (rounding_mode.has_value()) {
return at::zeros_like(grad, grad.options().dtype(other.scalar_type()));
}
@ -1397,7 +1397,7 @@ Tensor convolution_backward_jvp_grad_bias(
// input_name Name of `input` tensor, from derivative formula
at::SymIntArrayRef strides_or_error(
const Tensor& input,
c10::string_view const& input_name) {
std::string_view const& input_name) {
// TODO: Ideally, this function would never be called if requires_grad is
// not set. Once codegen is updated to avoid the call, we can remove this
// check.
@ -3282,7 +3282,7 @@ Tensor gelu_double_backward(
const Tensor& ggI,
const Tensor& gO,
const Tensor& input,
c10::string_view approximate) {
std::string_view approximate) {
// if (at::native::get_gelutype_enum(approximate) ==
// at::native::GeluType::Tanh) {
if (approximate == "tanh") {
@ -3884,7 +3884,7 @@ std::tuple<Tensor, Tensor> linalg_qr_jvp(
const Tensor& dA,
const Tensor& Q,
const Tensor& R,
const c10::string_view mode) {
const std::string_view mode) {
// dA = dQR + QdR
//
// Case m >= n
@ -3978,7 +3978,7 @@ Tensor linalg_qr_backward(
const Tensor& gR,
const Tensor& Q,
const Tensor& R,
const c10::string_view mode) {
const std::string_view mode) {
// Nb. We won't be too formal below, as writing this proof formally is a pain
// We'll link here a formal writing of all this at some point in the future
//
@ -6767,7 +6767,7 @@ Tensor scatter_reduce_jvp(
const Tensor& index,
const Tensor& src_p,
const Tensor& src_t,
c10::string_view reduce,
std::string_view reduce,
bool include_self,
const Tensor& result) {
if (reduce == "sum" || reduce == "mean") {
@ -6800,7 +6800,7 @@ std::tuple<Tensor, Tensor> scatter_reduce_backward(
int dim,
const Tensor& index,
const Tensor& src,
c10::string_view reduce,
std::string_view reduce,
bool include_self,
const Tensor& result) {
Tensor grad_self, grad_src;
@ -6900,7 +6900,7 @@ std::tuple<Tensor, Tensor> index_reduce_backward(
int dim,
const Tensor& index,
const Tensor& source,
c10::string_view reduce,
std::string_view reduce,
bool include_self,
const Tensor& result) {
Tensor grad_self, grad_src;

View File

@ -145,12 +145,12 @@ at::Tensor div_tensor_self_backward(
const Tensor& grad,
T other,
ScalarType self_st,
const std::optional<c10::string_view>& rounding_mode = std::nullopt);
const std::optional<std::string_view>& rounding_mode = std::nullopt);
at::Tensor div_tensor_other_backward(
const Tensor& grad,
const Tensor& self,
const Tensor& other,
const std::optional<c10::string_view>& rounding_mode = std::nullopt);
const std::optional<std::string_view>& rounding_mode = std::nullopt);
at::Tensor mvlgamma_backward(
const at::Tensor& grad,
const at::Tensor& self,
@ -291,7 +291,7 @@ at::Tensor clamp_jvp(
const Tensor& max_t);
at::SymIntArrayRef strides_or_error(
const Tensor& input,
c10::string_view const& input_name);
std::string_view const& input_name);
at::Tensor mm_mat1_backward(
const Tensor& grad,
const Tensor& mat2,
@ -684,13 +684,13 @@ std::tuple<Tensor, Tensor> linalg_qr_jvp(
const Tensor& dA,
const Tensor& Q,
const Tensor& R,
const c10::string_view mode);
const std::string_view mode);
Tensor linalg_qr_backward(
const Tensor& gQ,
const Tensor& gR,
const Tensor& Q,
const Tensor& R,
const c10::string_view mode);
const std::string_view mode);
Tensor linalg_matrix_exp_differential(
const Tensor& self,
const Tensor& grad,
@ -768,7 +768,7 @@ Tensor gelu_double_backward(
const Tensor& ggI,
const Tensor& gO,
const Tensor& input,
c10::string_view approximate);
std::string_view approximate);
Tensor as_strided_backward(
Tensor grad,
const TensorGeometry& input_geometry,
@ -1037,7 +1037,7 @@ Tensor scatter_reduce_jvp(
const Tensor& index,
const Tensor& src_p,
const Tensor& src_t,
c10::string_view reduce,
std::string_view reduce,
bool include_self,
const Tensor& result);
@ -1047,7 +1047,7 @@ std::tuple<Tensor, Tensor> scatter_reduce_backward(
int dim,
const Tensor& index,
const Tensor& src,
c10::string_view reduce,
std::string_view reduce,
bool include_self,
const Tensor& result);
@ -1061,7 +1061,7 @@ std::tuple<Tensor, Tensor> index_reduce_backward(
int dim,
const Tensor& index,
const Tensor& source,
c10::string_view reduce,
std::string_view reduce,
bool include_self,
const Tensor& result);

View File

@ -413,7 +413,7 @@ class WrapperFunctor final : public c10::OperatorKernel {
template <class Return, class... Args>
Return run_jit_decomposition_with_args_for_jvp(
c10::string_view name,
std::string_view name,
const c10::OperatorHandle& opHandle,
c10::DispatchKeySet dispatchKeySet,
Args&&... args) {

View File

@ -539,7 +539,7 @@ static PyObject* set_autocast_enabled(
PyObject* kwargs) {
HANDLE_TH_ERRORS
static PythonArgParser parser(
{"set_autocast_enabled(c10::string_view device_type, bool enabled)",
{"set_autocast_enabled(std::string_view device_type, bool enabled)",
"set_autocast_enabled(bool enabled)"}); // this signature is depracated.
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
@ -562,7 +562,7 @@ static PyObject* is_autocast_enabled(
PyObject* kwargs) {
HANDLE_TH_ERRORS
static PythonArgParser parser(
{"is_autocast_enabled(c10::string_view device_type)",
{"is_autocast_enabled(std::string_view device_type)",
"is_autocast_enabled()"}); // this signature is depracated.
ParsedArgs<1> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
@ -585,7 +585,7 @@ static PyObject* get_autocast_dtype(
PyObject* kwargs) {
HANDLE_TH_ERRORS
static PythonArgParser parser(
{"get_autocast_dtype(c10::string_view device_type)"});
{"get_autocast_dtype(std::string_view device_type)"});
ParsedArgs<1> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
auto device_type = at::Device(r.string(0)).type();
@ -600,7 +600,7 @@ static PyObject* set_autocast_dtype(
PyObject* kwargs) {
HANDLE_TH_ERRORS
static PythonArgParser parser(
{"set_autocast_dtype(c10::string_view device_type, ScalarType dtype)"});
{"set_autocast_dtype(std::string_view device_type, ScalarType dtype)"});
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
auto device_type = at::Device(r.string(0)).type();
@ -632,7 +632,7 @@ static PyObject* is_autocast_available(
PyObject* kwargs) {
HANDLE_TH_ERRORS
static PythonArgParser parser(
{"_is_autocast_available(c10::string_view device_type)"});
{"_is_autocast_available(std::string_view device_type)"});
ParsedArgs<1> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
auto device_type = at::Device(r.string(0)).type();

View File

@ -697,7 +697,7 @@ static PyObject* THPVariable_make_subclass(
PyObject* kwargs) {
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_make_subclass(PyObject* cls, Tensor data, bool require_grad=False, *, c10::string_view? dispatch_sizes_strides_policy=None, bool dispatch_device=False, bool dispatch_layout=False, Device? device_for_backend_keys=None)",
"_make_subclass(PyObject* cls, Tensor data, bool require_grad=False, *, std::string_view? dispatch_sizes_strides_policy=None, bool dispatch_device=False, bool dispatch_layout=False, Device? device_for_backend_keys=None)",
});
ParsedArgs<7> parsed_args{};
auto r = parser.parse(args, kwargs, parsed_args);
@ -774,7 +774,7 @@ static PyObject* THPVariable_make_wrapper_subclass(
"_make_wrapper_subclass(PyObject* cls, SymIntArrayRef size, SymIntArrayRef? strides=None, "
"SymInt? storage_offset=None, MemoryFormat? memory_format=None, ScalarType dtype=None, "
"Layout layout=torch.strided, Device device=None, bool pin_memory=False, bool requires_grad=False, "
"c10::string_view? dispatch_sizes_strides_policy=None, bool dispatch_device=False, bool dispatch_layout=False, "
"std::string_view? dispatch_sizes_strides_policy=None, bool dispatch_device=False, bool dispatch_layout=False, "
"DispatchKeySet _extra_dispatch_keys=None, SymInt? storage_size=None)",
});
ParsedArgs<15> parsed_args{};