From 1fd119948ebde3b6e3864fa5ead03a4fffde8348 Mon Sep 17 00:00:00 2001 From: Xuehai Pan Date: Wed, 1 Mar 2023 23:50:52 +0000 Subject: [PATCH] [3/3] Update `.pyi` Python stub files and enable `'UFMT'` linter (#95268) Changes: - #95200 1. Recognize `.py.in` and `.pyi.in` files as Python in VS Code for a better development experience. 2. Fix deep setting merge in `tools/vscode_settings.py`. - #95267 3. Use `Namedtuple` rather than `namedtuple + __annotations__` for `torch.nn.utils.rnn.PackedSequence_`: `namedtuple + __annotations__`: ```python PackedSequence_ = namedtuple('PackedSequence_', ['data', 'batch_sizes', 'sorted_indices', 'unsorted_indices']) # type annotation for PackedSequence_ to make it compatible with TorchScript PackedSequence_.__annotations__ = {'data': torch.Tensor, 'batch_sizes': torch.Tensor, 'sorted_indices': Optional[torch.Tensor], 'unsorted_indices': Optional[torch.Tensor]} ``` `Namedtuple`: Python 3.6+ ```python class PackedSequence_(NamedTuple): data: torch.Tensor batch_sizes: torch.Tensor sorted_indices: Optional[torch.Tensor] unsorted_indices: Optional[torch.Tensor] ``` - => this PR: #95268 4. Sort import statements and remove unnecessary imports in `.pyi`, `.pyi.in` files. 5. Format `.pyi`, `.pyi.in` files and remove unnecessary ellipsis `...` in type stubs. Pull Request resolved: https://github.com/pytorch/pytorch/pull/95268 Approved by: https://github.com/huydhn --- .lintrunner.toml | 1 + .vscode/extensions.json | 3 +- .vscode/settings_recommended.json | 3 +- torch/_C/_VariableFunctions.pyi.in | 37 +- torch/_C/__init__.pyi.in | 932 +++++++++++++-------- torch/_C/_autograd.pyi | 34 +- torch/_C/_cudnn.pyi | 2 +- torch/_C/_distributed_autograd.pyi | 5 +- torch/_C/_distributed_c10d.pyi | 19 +- torch/_C/_distributed_rpc.pyi | 43 +- torch/_C/_distributed_rpc_testing.pyi | 5 +- torch/_C/_dynamo/eval_frame.pyi | 2 +- torch/_C/_functions.pyi | 5 +- torch/_C/_functorch.pyi | 4 +- torch/_C/_lazy.pyi | 1 + torch/_C/_lazy_ts_backend.pyi | 9 +- torch/_C/_monitor.pyi | 9 +- torch/_C/_nn.pyi.in | 60 +- torch/_C/_profiler.pyi | 2 - torch/_C/return_types.pyi.in | 30 +- torch/fx/__init__.pyi | 10 +- torch/nn/functional.pyi.in | 901 ++++++++++++-------- torch/nn/parallel/__init__.pyi | 2 +- torch/nn/parallel/common_types.pyi | 5 +- torch/nn/parallel/data_parallel.pyi | 26 +- torch/nn/parallel/parallel_apply.pyi | 14 +- torch/nn/parallel/replicate.pyi | 11 +- torch/nn/parallel/scatter_gather.pyi | 24 +- torch/nn/parameter.pyi | 45 +- torch/nn/utils/rnn.pyi | 102 ++- torch/optim/__init__.pyi | 3 +- torch/optim/_multi_tensor/__init__.pyi | 1 + torch/optim/adadelta.pyi | 10 +- torch/optim/adagrad.pyi | 11 +- torch/optim/adam.pyi | 19 +- torch/optim/adamax.pyi | 10 +- torch/optim/adamw.pyi | 19 +- torch/optim/asgd.pyi | 11 +- torch/optim/lbfgs.pyi | 15 +- torch/optim/lr_scheduler.pyi | 170 +++- torch/optim/nadam.pyi | 11 +- torch/optim/optimizer.pyi | 10 +- torch/optim/radam.pyi | 10 +- torch/optim/rmsprop.pyi | 12 +- torch/optim/rprop.pyi | 9 +- torch/optim/sgd.pyi | 10 +- torch/optim/sparse_adam.pyi | 10 +- torch/optim/swa_utils.pyi | 36 +- torch/utils/data/datapipes/datapipe.pyi.in | 42 +- 49 files changed, 1819 insertions(+), 946 deletions(-) diff --git a/.lintrunner.toml b/.lintrunner.toml index 940dea358dd2..9450cddbe0d4 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -841,6 +841,7 @@ include_patterns = [ 'torch/_refs/**/*.py', 'torch/_subclasses/**/*.py', 'torch/_*.py', + 'torch/**/*.pyi', 'torch/testing/_internal/opinfo/**/*.py', 'torchgen/**/*.py', 'torch/_functorch/make_functional.py', diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 9c4cf774157a..0125b886536e 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -1,5 +1,6 @@ { "recommendations": [ - "ms-python.python" + "ms-python.python", + "omnilib.ufmt" ] } diff --git a/.vscode/settings_recommended.json b/.vscode/settings_recommended.json index db356b7d16fe..2d12dd44c89c 100644 --- a/.vscode/settings_recommended.json +++ b/.vscode/settings_recommended.json @@ -4,7 +4,8 @@ }, "files.associations": { "*.py.in": "python", - "*.pyi.in": "python" + "*.pyi.in": "python", + "editor.defaultFormatter": "omnilib.ufmt" }, "files.eol": "\n", "files.insertFinalNewline": true, diff --git a/torch/_C/_VariableFunctions.pyi.in b/torch/_C/_VariableFunctions.pyi.in index 8a5a63837aa6..3c773f42a616 100644 --- a/torch/_C/_VariableFunctions.pyi.in +++ b/torch/_C/_VariableFunctions.pyi.in @@ -1,12 +1,37 @@ # ${generated_comment} -from torch import Tensor, Generator, strided, memory_format, contiguous_format, strided, inf -from typing import List, Tuple, Optional, Union, Any, ContextManager, Callable, overload, Iterator, NamedTuple, Sequence, Literal, TypeVar - -from torch.types import _int, _float, _bool, Number, _dtype, _device, _qscheme, _size, _layout, SymInt, Device -import torch - import builtins +from typing import ( + Any, + Callable, + ContextManager, + Iterator, + List, + Literal, + NamedTuple, + Optional, + overload, + Sequence, + Tuple, + TypeVar, + Union, +) + +import torch +from torch import contiguous_format, Generator, inf, memory_format, strided, Tensor +from torch.types import ( + _bool, + _device, + _dtype, + _float, + _int, + _layout, + _qscheme, + _size, + Device, + Number, + SymInt, +) ${function_hints} diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in index b4f8510f6fc6..07664506cd11 100644 --- a/torch/_C/__init__.pyi.in +++ b/torch/_C/__init__.pyi.in @@ -1,33 +1,57 @@ # ${generated_comment} -import torch -from torch.package import PackageExporter -from torch import Tensor, inf -from torch.autograd.graph import Node as _Node +import builtins from enum import Enum from pathlib import Path from typing import ( - Any, BinaryIO, Callable, ContextManager, Dict, Iterable, Iterator, List, - NamedTuple, Optional, overload, Sequence, Tuple, TypeVar, Type, Union, - Literal, Generic, Set, AnyStr) - -from torch.types import ( - _int, _float, _bool, _dtype, _device, _qscheme, _size, _layout, Device, Number, Storage, SymInt, _dispatchkey + Any, + AnyStr, + BinaryIO, + Callable, + ContextManager, + Dict, + Generic, + Iterable, + Iterator, + List, + Literal, + NamedTuple, + Optional, + overload, + Sequence, + Set, + Tuple, + Type, + TypeVar, + Union, ) -from torch.storage import TypedStorage -import builtins +import torch +from torch import inf, Tensor +from torch.autograd.graph import Node as _Node +from torch.package import PackageExporter +from torch.storage import TypedStorage +from torch.types import ( + _bool, + _device, + _dispatchkey, + _dtype, + _float, + _int, + _layout, + _qscheme, + _size, + Device, + Number, + Storage, + SymInt, +) # This module is defined in torch/csrc/Module.cpp -from . import _nn as _nn -from . import _onnx as _onnx -from . import _VariableFunctions as _VariableFunctions -from . import _functorch as _functorch -from . import _lazy as _lazy -from . import _lazy_ts_backend as _lazy_ts_backend +from . import _functorch, _lazy, _lazy_ts_backend, _nn, _onnx, _VariableFunctions -T = TypeVar('T') +T = TypeVar("T") S = TypeVar("S", bound="torch.Tensor") # Defined in torch/csrc/Device.cpp @@ -40,17 +64,14 @@ class device: # THPDevice_pynew @overload def __init__(self, device: Union[_device, _int, str]) -> None: ... - @overload def __init__(self, type: str, index: _int) -> None: ... # Uncomment if we ever make torch.device a decorator # def __call__(self, func: T) -> T: ... - def __enter__(self) -> "device": ... - + def __enter__(self) -> device: ... def __exit__(self, exc_type, exc_val, exc_tb) -> None: ... - def __reduce__(self) -> Tuple[Any, ...]: ... # THPDevice_reduce # Defined in torch/csrc/Stream.cpp @@ -59,9 +80,7 @@ class Stream: device_index: _int device_type: _int - device: device # The device of the stream - - ... + device: device # The device of the stream # Defined in torch/csrc/Size.cpp class Size(Tuple[_int, ...]): @@ -69,21 +88,16 @@ class Size(Tuple[_int, ...]): @overload # type: ignore[override] def __getitem__(self: Size, key: _int) -> _int: ... - @overload def __getitem__(self: Size, key: slice) -> Size: ... - def numel(self: Size) -> _int: ... - ... - # Defined in torch/csrc/Dtype.cpp class dtype: # TODO: __reduce__ is_floating_point: _bool is_complex: _bool is_signed: _bool - ... # Defined in torch/csrc/TypeInfo.cpp class iinfo: @@ -106,28 +120,26 @@ class finfo: @overload def __init__(self, dtype: _dtype) -> None: ... - @overload def __init__(self) -> None: ... ${dtype_class_hints} # Defined in torch/csrc/Layout.cpp -class layout: - ... +class layout: ... # Defined in torch/csrc/utils/disable_torch_function.cpp def DisableTorchFunction(): ... def DisableTorchFunctionSubclass(): ... # Defined in torch/csrc/utils/tensor_layouts.cpp -strided : layout = ... -sparse_coo : layout = ... -sparse_csr : layout = ... -sparse_csc : layout = ... -sparse_bsr : layout = ... -sparse_bsc : layout = ... -_mkldnn : layout = ... +strided: layout = ... +sparse_coo: layout = ... +sparse_csr: layout = ... +sparse_csc: layout = ... +sparse_bsr: layout = ... +sparse_bsc: layout = ... +_mkldnn: layout = ... # Defined in torch/csrc/MemoryFormat.cpp class memory_format: ... @@ -149,45 +161,42 @@ per_channel_symmetric: qscheme = ... per_channel_affine_float_qparams: qscheme = ... # Defined in torch/csrc/autograd/python_function.cpp -class _FunctionBase: - ... +class _FunctionBase: ... # Defined in torch/csrc/autograd/python_legacy_variable.cpp class _LegacyVariableBase(Tensor): # inherits from Tensor to appease mypy def __init__( self, - data: Optional[Tensor]=..., - requires_grad: Optional[_bool]=..., - volatile: Optional[_bool]=..., - _grad_fn: Optional[_FunctionBase]=... + data: Optional[Tensor] = ..., + requires_grad: Optional[_bool] = ..., + volatile: Optional[_bool] = ..., + _grad_fn: Optional[_FunctionBase] = ..., ) -> None: ... # Defined in torch/csrc/jit/python/init.cpp class IODescriptor: ... - class JITException: ... class Future: - def __init__(self, devices: List[device]) -> None: ... - def done(self) -> _bool: ... - def value(self) -> Any: ... - def wait(self) -> Any: ... - def add_done_callback(self, callback: Callable) -> None: ... - def then(self, callback: Callable) -> Future: ... - def set_result(self, result: Any) -> None: ... - def _set_unwrap_func(self, callback: Callable) -> None: ... + def __init__(self, devices: List[device]) -> None: ... + def done(self) -> _bool: ... + def value(self) -> Any: ... + def wait(self) -> Any: ... + def add_done_callback(self, callback: Callable) -> None: ... + def then(self, callback: Callable) -> Future: ... + def set_result(self, result: Any) -> None: ... + def _set_unwrap_func(self, callback: Callable) -> None: ... class _Await: - def __init__(self) -> None: ... - def fn(self) -> Callable: ... - def args(self) -> Tuple[Any, ...]: ... - def is_nowait(self) -> _bool: ... + def __init__(self) -> None: ... + def fn(self) -> Callable: ... + def args(self) -> Tuple[Any, ...]: ... + def is_nowait(self) -> _bool: ... def _jit_set_num_profiled_runs(num: _size) -> _size: ... # Defined in torch/csrc/jit/passes/mobile_optimizer_type.h -class _MobileOptimizerType: - ... +class _MobileOptimizerType: ... CONV_BN_FUSION: _MobileOptimizerType INSERT_FOLD_PREPACK_OPS: _MobileOptimizerType @@ -203,50 +212,67 @@ def _awaitable_wait(aw: _Await) -> Any: ... def _awaitable_nowait(x: Any) -> _Await: ... def _collect_all(futures: List[Future]) -> Future: ... def _set_print_stack_traces_on_fatal_signal(print: _bool) -> None: ... - def unify_type_list(types: List[JitType]) -> JitType: ... -def _freeze_module(module: ScriptModule, - preserved_attrs: List[str] = [], - freeze_interfaces: _bool = True, - preserveParameters: _bool = True) -> ScriptModule: ... +def _freeze_module( + module: ScriptModule, + preserved_attrs: List[str] = [], + freeze_interfaces: _bool = True, + preserveParameters: _bool = True, +) -> ScriptModule: ... def _jit_pass_optimize_frozen_graph(Graph, optimize_numerics: _bool = True) -> None: ... -def _jit_pass_optimize_for_inference(module: 'torch.jit.ScriptModule', - other_methods: List[str] = []) -> None: ... +def _jit_pass_optimize_for_inference( + module: torch.jit.ScriptModule, + other_methods: List[str] = [], +) -> None: ... def _jit_pass_fold_frozen_conv_bn(graph: Graph): ... def _jit_pass_fold_frozen_conv_add_or_sub(graph: Graph): ... def _jit_pass_fold_frozen_conv_mul_or_div(graph: Graph): ... def _jit_pass_fuse_frozen_conv_add_relu(graph: Graph): ... def _jit_pass_concat_frozen_linear(graph: Graph): ... def _jit_pass_convert_frozen_ops_to_mkldnn(graph: Graph): ... -def _jit_pass_transpose_frozen_linear(graph:Graph): ... -def _jit_pass_remove_dropout(module: 'torch.jit.ScriptModule'): ... - +def _jit_pass_transpose_frozen_linear(graph: Graph): ... +def _jit_pass_remove_dropout(module: torch.jit.ScriptModule): ... def _is_tracing() -> _bool: ... def _jit_init() -> _bool: ... def _jit_flatten(arg: Any) -> Tuple[List[Tensor], IODescriptor]: ... def _jit_unflatten(vars: List[Tensor], desc: IODescriptor) -> Any: ... def _jit_get_operation(op_name: str) -> Tuple[Callable, List[str]]: ... -def _get_operation_overload(op_name: str, op_overload_name: str) -> Tuple[Callable, Callable, List[Any]]: ... +def _get_operation_overload( + op_name: str, + op_overload_name: str, +) -> Tuple[Callable, Callable, List[Any]]: ... def _get_schema(op_name: str, overload_name: str) -> FunctionSchema: ... -def _jit_pass_optimize_for_mobile(module: 'torch.jit.ScriptModule', - optimization_blocklist: Set[_MobileOptimizerType], - preserved_methods: List[AnyStr]) -> 'torch.jit.ScriptModule': ... -def _clone_module_with_class(module: 'torch.jit.ScriptModule', - ignored_methods: List[AnyStr], - ignored_attributes: List[AnyStr]) -> 'torch.jit.ScriptModule': ... -def _jit_pass_vulkan_optimize_for_mobile(module: 'torch.jit.ScriptModule', - optimization_blocklist: Set[_MobileOptimizerType], - preserved_methods: List[AnyStr]) -> 'torch.jit.ScriptModule': ... -def _jit_pass_metal_optimize_for_mobile(module: 'torch.jit.ScriptModule', - preserved_methods: List[AnyStr]) -> 'torch.jit.ScriptModule': ... +def _jit_pass_optimize_for_mobile( + module: torch.jit.ScriptModule, + optimization_blocklist: Set[_MobileOptimizerType], + preserved_methods: List[AnyStr], +) -> torch.jit.ScriptModule: ... +def _clone_module_with_class( + module: torch.jit.ScriptModule, + ignored_methods: List[AnyStr], + ignored_attributes: List[AnyStr], +) -> torch.jit.ScriptModule: ... +def _jit_pass_vulkan_optimize_for_mobile( + module: torch.jit.ScriptModule, + optimization_blocklist: Set[_MobileOptimizerType], + preserved_methods: List[AnyStr], +) -> torch.jit.ScriptModule: ... +def _jit_pass_metal_optimize_for_mobile( + module: torch.jit.ScriptModule, + preserved_methods: List[AnyStr], +) -> torch.jit.ScriptModule: ... def _jit_pass_inline(Graph) -> None: ... def _jit_pass_constant_propagation(Graph) -> None: ... def _jit_pass_propagate_shapes_on_graph(Graph) -> None: ... def _jit_register_decomposition_for_schema(schema: FunctionSchema, Graph) -> None: ... def _jit_erase_non_input_shape_information(Graph) -> None: ... -def _jit_get_schemas_for_operator(name :str) -> List[FunctionSchema]: ... +def _jit_get_schemas_for_operator(name: str) -> List[FunctionSchema]: ... def _jit_get_all_schemas() -> List[FunctionSchema]: ... -def _jit_check_alias_annotation(g: Graph, args: Tuple[Any, ...], unqualified_op_name: str): ... +def _jit_check_alias_annotation( + g: Graph, + args: Tuple[Any, ...], + unqualified_op_name: str, +): ... def _jit_can_fuse_on_cpu() -> _bool: ... def _jit_can_fuse_on_gpu() -> _bool: ... def _jit_can_fuse_on_cpu_legacy() -> _bool: ... @@ -269,36 +295,50 @@ def _jit_cat_wo_conditionals(optimize_cat: _bool): ... def _jit_opt_conditionals(opt_conds: _bool): ... def _jit_pass_canonicalize(graph: Graph, keep_unique_names: _bool = True): ... def _jit_pass_erase_shape_information(graph: Graph): ... -def _jit_pass_fold_convbn(module: 'torch.jit.ScriptModule'): ... -def _jit_pass_insert_observers(module: 'torch.jit.ScriptModule', - method_name: str, - qconfig_dict: Dict[str, Any], - inplace: _bool, - quant_type: _int): ... -def _jit_pass_insert_quant_dequant(module: 'torch.jit.ScriptModule', - method_name: str, - inplace: _bool, - debug: _bool, - quant_type: _int): ... -def _jit_pass_insert_quant_dequant_for_ondevice_ptq(module: 'torch.jit.ScriptModule', - method_name: str, - inplace: _bool, - debug: _bool, - quant_type: _int): ... -def _jit_pass_quant_finalize(module: 'torch.jit.ScriptModule', - quant_type: _int, - preserved_attrs: Sequence[str]): ... -def _jit_pass_quant_finalize_for_ondevice_ptq(module: 'torch.jit.ScriptModule', - quant_type: _int, - method_name: str): ... -def _jit_pass_insert_observer_method_for_ondevice_ptq(module: 'torch.jit.ScriptModule', - method_name: str, - qconfig_dict: Dict[str, Any], - inplace: _bool, - quant_type: _int): ... +def _jit_pass_fold_convbn(module: torch.jit.ScriptModule): ... +def _jit_pass_insert_observers( + module: torch.jit.ScriptModule, + method_name: str, + qconfig_dict: Dict[str, Any], + inplace: _bool, + quant_type: _int, +): ... +def _jit_pass_insert_quant_dequant( + module: torch.jit.ScriptModule, + method_name: str, + inplace: _bool, + debug: _bool, + quant_type: _int, +): ... +def _jit_pass_insert_quant_dequant_for_ondevice_ptq( + module: torch.jit.ScriptModule, + method_name: str, + inplace: _bool, + debug: _bool, + quant_type: _int, +): ... +def _jit_pass_quant_finalize( + module: torch.jit.ScriptModule, + quant_type: _int, + preserved_attrs: Sequence[str], +): ... +def _jit_pass_quant_finalize_for_ondevice_ptq( + module: torch.jit.ScriptModule, + quant_type: _int, + method_name: str, +): ... +def _jit_pass_insert_observer_method_for_ondevice_ptq( + module: torch.jit.ScriptModule, + method_name: str, + qconfig_dict: Dict[str, Any], + inplace: _bool, + quant_type: _int, +): ... def _jit_set_profiling_executor(profiling_flag: _bool) -> _bool: ... def _jit_set_profiling_mode(profiling_flag: _bool) -> _bool: ... -def _jit_set_fusion_strategy(strategy: List[Tuple[str, _int]]) -> List[Tuple[str, _int]]: ... +def _jit_set_fusion_strategy( + strategy: List[Tuple[str, _int]], +) -> List[Tuple[str, _int]]: ... def _jit_try_infer_type(obj: Any) -> InferredType: ... def _jit_get_trigger_value(trigger_name: str) -> _int: ... @@ -312,18 +352,41 @@ def _debug_set_autodiff_subgraph_inlining(disabled: _bool) -> None: ... def _ivalue_tags_match(lhs: ScriptModule, rhs: ScriptModule) -> _bool: ... def _jit_assert_is_instance(obj: Any, type: JitType): ... def _jit_clear_class_registry() -> None: ... -def _jit_set_emit_hooks(ModuleHook: Optional[Callable], FunctionHook: Optional[Callable]) -> None: ... +def _jit_set_emit_hooks( + ModuleHook: Optional[Callable], + FunctionHook: Optional[Callable], +) -> None: ... def _jit_get_emit_hooks() -> Tuple[Callable, Callable]: ... -def _load_for_lite_interpreter(filename: Union[str, Path], map_location: Union[_device, str, None]): ... -def _load_for_lite_interpreter_from_buffer(buffer: BinaryIO, map_location: Union[_device, str, None]): ... +def _load_for_lite_interpreter( + filename: Union[str, Path], + map_location: Union[_device, str, None], +): ... +def _load_for_lite_interpreter_from_buffer( + buffer: BinaryIO, + map_location: Union[_device, str, None], +): ... def _export_operator_list(module: LiteScriptModule): ... def _quantize_ondevice_ptq_dynamic(module: LiteScriptModule, method_name: str): ... def _get_model_bytecode_version(filename: Union[str, Path]) -> _int: ... def _get_model_bytecode_version_from_buffer(buffer: BinaryIO) -> _int: ... -def _backport_for_mobile(filename_input: Union[str, Path], filename_output: Union[str, Path], to_version: _int) -> None: ... -def _backport_for_mobile_from_buffer(buffer: BinaryIO, filename_output: Union[str, Path], to_version: _int) -> None: ... -def _backport_for_mobile_to_buffer(filename_input: Union[str, Path], to_version: _int) -> bytes:... -def _backport_for_mobile_from_buffer_to_buffer(buffer: BinaryIO, to_version: _int) -> bytes:... +def _backport_for_mobile( + filename_input: Union[str, Path], + filename_output: Union[str, Path], + to_version: _int, +) -> None: ... +def _backport_for_mobile_from_buffer( + buffer: BinaryIO, + filename_output: Union[str, Path], + to_version: _int, +) -> None: ... +def _backport_for_mobile_to_buffer( + filename_input: Union[str, Path], + to_version: _int, +) -> bytes: ... +def _backport_for_mobile_from_buffer_to_buffer( + buffer: BinaryIO, + to_version: _int, +) -> bytes: ... def _get_model_ops_and_info(filename: Union[str, Path]): ... def _get_model_ops_and_info_from_buffer(buffer: BinaryIO): ... def _get_mobile_model_contained_types(filename: Union[str, Path]): ... @@ -339,7 +402,7 @@ def _create_function_from_trace( var_lookup_fn: Callable[[Tensor], str], strict: _bool, force_outplace: _bool, - argument_names: List[str] + argument_names: List[str], ) -> Tuple[Graph, Stack]: ... def _create_function_from_trace_with_dict( qualname: str, @@ -348,7 +411,7 @@ def _create_function_from_trace_with_dict( var_lookup_fn: Callable[[Tensor], str], strict: _bool, force_outplace: _bool, - argument_names: List[str] + argument_names: List[str], ) -> Tuple[Graph, Stack]: ... def _jit_is_script_object(obj: Any) -> _bool: ... def _last_executed_optimized_graph() -> Graph: ... @@ -357,25 +420,56 @@ def _get_upgraders_map_size() -> _int: ... def _dump_upgraders_map() -> Dict[str, str]: ... def _test_only_populate_upgraders(content: Dict[str, str]) -> None: ... def _test_only_remove_upgraders(content: Dict[str, str]) -> None: ... -def merge_type_from_type_comment(decl: Decl, type_annotation_decl: Decl, is_method: _bool) -> Decl: ... +def merge_type_from_type_comment( + decl: Decl, + type_annotation_decl: Decl, + is_method: _bool, +) -> Decl: ... def parse_ir(input: str, parse_tensor_constants: _bool) -> Graph: ... def parse_schema(schema: str) -> FunctionSchema: ... def get_device(input: Tensor) -> _int: ... - -def _resolve_type_from_object(obj: Any, range: SourceRange, rcb: ResolutionCallback) -> JitType: ... +def _resolve_type_from_object( + obj: Any, + range: SourceRange, + rcb: ResolutionCallback, +) -> JitType: ... def _create_module_with_type(ty: JitType) -> ScriptModule: ... def _create_object_with_type(ty: ClassType) -> ScriptObject: ... def _run_emit_module_hook(m: ScriptModule): ... -def _replace_overloaded_method_decl(overload_decl: Decl, implementation_def: Def, new_name: str) -> Def: ... - +def _replace_overloaded_method_decl( + overload_decl: Decl, + implementation_def: Def, + new_name: str, +) -> Def: ... def _jit_pass_lower_all_tuples(graph: Graph) -> None: ... -def _jit_pass_onnx_set_dynamic_input_shape(graph: Graph, dynamic_axes: Dict[str, Dict[_int, str]], input_names: List[str]) -> None: ... -def _jit_pass_onnx_graph_shape_type_inference(graph: Graph, params_dict: Dict[str, IValue], opset_version: _int) -> None: ... -def _jit_pass_onnx_assign_output_shape(graph: Graph, tensors: List[Tensor], desc: IODescriptor, onnx_shape_inference: _bool, is_script: _bool, opset_version: _int) -> None: ... -def _jit_pass_onnx_remove_inplace_ops_for_onnx(graph: Graph, module: Optional[ScriptModule] = None) -> None: ... +def _jit_pass_onnx_set_dynamic_input_shape( + graph: Graph, + dynamic_axes: Dict[str, Dict[_int, str]], + input_names: List[str], +) -> None: ... +def _jit_pass_onnx_graph_shape_type_inference( + graph: Graph, + params_dict: Dict[str, IValue], + opset_version: _int, +) -> None: ... +def _jit_pass_onnx_assign_output_shape( + graph: Graph, + tensors: List[Tensor], + desc: IODescriptor, + onnx_shape_inference: _bool, + is_script: _bool, + opset_version: _int, +) -> None: ... +def _jit_pass_onnx_remove_inplace_ops_for_onnx( + graph: Graph, + module: Optional[ScriptModule] = None, +) -> None: ... def _jit_pass_remove_inplace_ops(graph: Graph) -> None: ... def _jit_pass_canonicalize_graph_fuser_ops(graph: Graph) -> None: ... -def _jit_pass_peephole(graph: Graph, disable_shape_peepholes: _bool = False) -> None: ... +def _jit_pass_peephole( + graph: Graph, + disable_shape_peepholes: _bool = False, +) -> None: ... def _jit_pass_onnx_autograd_function_process(graph: Graph) -> None: ... def _jit_pass_fuse_addmm(graph: Graph) -> None: ... def _jit_pass_onnx_preprocess(graph: Graph) -> None: ... @@ -385,100 +479,150 @@ def _jit_pass_onnx_preprocess_caffe2(graph: Graph) -> None: ... def _jit_pass_onnx_unpack_quantized_weights( graph: Graph, paramsDict: Dict[str, IValue], - caffe2: _bool + caffe2: _bool, ) -> Dict[str, IValue]: ... def _jit_pass_onnx_quantization_insert_permutes( graph: Graph, - paramsDict: Dict[str, IValue] + paramsDict: Dict[str, IValue], ) -> Dict[str, IValue]: ... -def _jit_pass_custom_pattern_based_rewrite_graph(pattern: str, fused_node_name: str, graph: Graph) -> None: ... -def _jit_onnx_list_model_parameters(module: ScriptModule) -> Tuple[ScriptModule, List[IValue]]: ... +def _jit_pass_custom_pattern_based_rewrite_graph( + pattern: str, + fused_node_name: str, + graph: Graph, +) -> None: ... +def _jit_onnx_list_model_parameters( + module: ScriptModule, +) -> Tuple[ScriptModule, List[IValue]]: ... def _jit_pass_erase_number_types(graph: Graph) -> None: ... def _jit_pass_onnx_lint(graph: Graph) -> None: ... -def _jit_pass_onnx(graph: Graph, _jit_pass_onnx: _onnx.OperatorExportTypes) -> Graph: ... -def _jit_pass_onnx_scalar_type_analysis(graph: Graph, lowprecision_cast: _bool, opset_version: _int) -> None: ... -def _jit_pass_onnx_peephole(graph: Graph, opset_version: _int, fixed_batch_size: _bool) -> None: ... +def _jit_pass_onnx( + graph: Graph, + _jit_pass_onnx: _onnx.OperatorExportTypes, +) -> Graph: ... +def _jit_pass_onnx_scalar_type_analysis( + graph: Graph, + lowprecision_cast: _bool, + opset_version: _int, +) -> None: ... +def _jit_pass_onnx_peephole( + graph: Graph, + opset_version: _int, + fixed_batch_size: _bool, +) -> None: ... def _jit_pass_dce_allow_deleting_nodes_with_side_effects(graph: Graph) -> None: ... def _jit_pass_onnx_function_substitution(graph: Graph) -> None: ... -def _jit_pass_onnx_function_extraction(graph: Graph, module_names : Set[str], param_names : List[str]) -> Dict[Node, Dict[str, str]]: ... +def _jit_pass_onnx_function_extraction( + graph: Graph, + module_names: Set[str], + param_names: List[str], +) -> Dict[Node, Dict[str, str]]: ... def _jit_pass_onnx_clear_scope_records() -> None: ... -def _jit_pass_onnx_track_scope_attributes(graph: Graph, onnx_attrs: Dict[str, Any]) -> None: ... +def _jit_pass_onnx_track_scope_attributes( + graph: Graph, + onnx_attrs: Dict[str, Any], +) -> None: ... def _jit_is_onnx_log_enabled() -> _bool: ... def _jit_set_onnx_log_enabled(enabled: _bool) -> None: ... def _jit_set_onnx_log_output_stream(stream_name: str) -> None: ... def _jit_onnx_log(*args: Any) -> None: ... def _jit_pass_lower_graph(graph: Graph, m: Module) -> Tuple[Graph, List[IValue]]: ... def _jit_pass_inline_fork_wait(graph: Graph) -> None: ... -def _jit_pass_onnx_deduplicate_initializers(graph: Graph, params_dict: Dict[str, IValue], is_train: _bool) -> Dict[str, IValue]: ... -def _jit_pass_onnx_eval_peephole(graph: Graph, paramsDict: Dict[str, IValue]) -> Dict[str, IValue]: ... -def _jit_pass_onnx_constant_fold(graph: Graph, paramsDict: Dict[str, IValue], opset_version: _int) -> Dict[str, IValue]: ... -def _jit_pass_onnx_eliminate_unused_items(graph: Graph, paramsDict: Dict[str, IValue]) -> Dict[str, IValue]: ... +def _jit_pass_onnx_deduplicate_initializers( + graph: Graph, + params_dict: Dict[str, IValue], + is_train: _bool, +) -> Dict[str, IValue]: ... +def _jit_pass_onnx_eval_peephole( + graph: Graph, + paramsDict: Dict[str, IValue], +) -> Dict[str, IValue]: ... +def _jit_pass_onnx_constant_fold( + graph: Graph, + paramsDict: Dict[str, IValue], + opset_version: _int, +) -> Dict[str, IValue]: ... +def _jit_pass_onnx_eliminate_unused_items( + graph: Graph, + paramsDict: Dict[str, IValue], +) -> Dict[str, IValue]: ... def _jit_pass_onnx_cast_all_constant_to_floating(graph: Graph) -> None: ... -def _jit_pass_filter_non_tensor_arguments(params: Dict[str, IValue]) -> Dict[str, Tensor]: ... +def _jit_pass_filter_non_tensor_arguments( + params: Dict[str, IValue], +) -> Dict[str, Tensor]: ... def _jit_decay_packed_param_input_types(graph: Graph) -> None: ... -def _jit_pass_onnx_node_shape_type_inference(n: Node, paramsDict: Dict[str, IValue], opset_version: _int) -> None: ... -def _jit_onnx_convert_pattern_from_subblock(block: Block, n: Node, env: Dict[Value, Value]) -> List[Value]: ... +def _jit_pass_onnx_node_shape_type_inference( + n: Node, + paramsDict: Dict[str, IValue], + opset_version: _int, +) -> None: ... +def _jit_onnx_convert_pattern_from_subblock( + block: Block, + n: Node, + env: Dict[Value, Value], +) -> List[Value]: ... def _jit_pass_onnx_block( old_block: Block, new_block: Block, operator_export_type: _onnx.OperatorExportTypes, env: Dict[Value, Value], - is_sub_block: _bool + is_sub_block: _bool, ) -> Dict[Value, Value]: ... def _jit_pass_onnx_assign_scoped_names_for_node_and_value(graph: Graph) -> None: ... -def _jit_pass_fixup_onnx_controlflow_node(n: Node, opset_version: _int) -> List[Value]: ... +def _jit_pass_fixup_onnx_controlflow_node( + n: Node, + opset_version: _int, +) -> List[Value]: ... def _jit_onnx_create_full_scope_name(class_name: str, variable_name: str) -> str: ... - def _compile_graph_to_code_table(name: str, graph: Graph) -> IValue: ... - def _generate_upgraders_graph() -> Dict[str, Graph]: ... - def _calculate_package_version_based_on_upgraders(val: _bool): ... - def _get_version_calculator_flag() -> _bool: ... - -def _jit_script_interface_compile(name: str, class_def: ClassDef, rcb: ResolutionCallback, is_module: _bool): ... +def _jit_script_interface_compile( + name: str, + class_def: ClassDef, + rcb: ResolutionCallback, + is_module: _bool, +): ... def _jit_script_compile_overload( qualname: str, overload_decl: Decl, implementation_def: Def, rcb: ResolutionCallback, implementation_defaults: Dict[str, Any], - signature: Any + signature: Any, ): ... def _jit_script_compile( qual_name: str, definition: Def, rcb: ResolutionCallback, - defaults: Dict[str, Any] + defaults: Dict[str, Any], ): ... def _jit_script_class_compile( qual_name: str, definition: ClassDef, defaults: Dict[str, Dict[str, Any]], - rcb: ResolutionCallback + rcb: ResolutionCallback, ): ... def _parse_source_def(src: str) -> Def: ... def import_ir_module( cu: CompilationUnit, filename: Union[str, Path], map_location: Union[_device, str, None], - extra_files: Dict[str, Any] + extra_files: Dict[str, Any], ) -> ScriptModule: ... def import_ir_module_from_buffer( cu: CompilationUnit, buffer: BinaryIO, map_location: Union[_device, str, None], - extra_files: Dict[str, Any] + extra_files: Dict[str, Any], ) -> ScriptModule: ... def _import_ir_module_from_package( cu: CompilationUnit, reader: PyTorchFileReader, storage_context: DeserializationStorageContext, map_location: Union[_device, str, None], - ts_id: str + ts_id: str, ) -> ScriptModule: ... - def _assign_output_shapes(graph: Graph, inputs: List[Tensor]) -> Graph: ... def _check_onnx_proto(proto: str) -> None: ... def _propagate_and_assign_input_shapes( @@ -486,17 +630,15 @@ def _propagate_and_assign_input_shapes( inputs: Tuple[Tensor, ...], param_count_list: List[_int], with_grad: _bool, - propagate: _bool + propagate: _bool, ) -> Graph: ... # Defined in torch/csrc/jit/runtime/graph_executor.h -class GraphExecutorState: - ... +class GraphExecutorState: ... # Defined in torch/torch/csrc/jit/ir/alias_analysis.h class AliasDb: def __str__(self) -> str: ... - ... class _InsertPoint: def __enter__(self) -> None: ... @@ -509,11 +651,10 @@ class Use: @property def offset(self) -> _int: ... def isAfter(self, other: Use) -> _bool: ... - ... # Defined in torch/csrc/jit/ir/ir.h class Value: - def type(self)-> JitType: ... + def type(self) -> JitType: ... def setType(self, t: JitType) -> Value: ... def setTypeAs(self, other: Value) -> Value: ... def inferTypeFrom(self, t: Tensor) -> None: ... @@ -530,7 +671,6 @@ class Value: def copyMetadata(self, other: Value) -> Value: ... def isCompleteTensor(self) -> _bool: ... def toIValue(self) -> IValue: ... - ... # Defined in torch/csrc/jit/ir/ir.h class Block: @@ -542,7 +682,6 @@ class Block: def owningNode(self) -> Node: ... def registerOutput(self, n: Value) -> _int: ... def addNode(self, name: str, inputs: Sequence[Value]) -> Node: ... - ... # Defined in torch/csrc/jit/ir/ir.h class Node: @@ -625,7 +764,6 @@ class Node: def ty_(self, name: str, val: JitType) -> Node: ... def tys(self, name: str) -> List[JitType]: ... def tys_(self, name: str, val: List[JitType]) -> Node: ... - ... # Defined in torch/torch/csrc/jit/ir/ir.h class Graph: @@ -651,8 +789,6 @@ class Graph: def insertGraph(self, callee: Graph, inputs: List[Value]) -> List[Value]: ... def makeMultiOutputIntoTuple(self) -> None: ... def copy(self) -> Graph: ... - ... - # Defined in torch/aten/src/ATen/core/alias_info.h class AliasInfo: @@ -660,42 +796,41 @@ class AliasInfo: before_set: Set[str] after_set: Set[str] - # Defined in torch/aten/src/ATen/core/function_schema.h class Argument: name: str type: JitType default_value: Optional[Any] def has_default_value(self) -> _bool: ... - kwarg_only : _bool + kwarg_only: _bool is_out: _bool alias_info: Optional[AliasInfo] - ... + class FunctionSchema: arguments: List[Argument] returns: List[Argument] name: str overload_name: str - ... class _UpgraderEntry: bumped_at_version: _int upgrader_name: str old_schema: str - def __init__(self, bumped_at_version: _int, upgrader_name: str, old_schema: str) -> None: ... + def __init__( + self, + bumped_at_version: _int, + upgrader_name: str, + old_schema: str, + ) -> None: ... class _UpgraderRange: min_version: _int max_version: _int def _get_max_operator_version() -> _int: ... - def _get_operator_version_map() -> Dict[str, List[_UpgraderEntry]]: ... - def _get_upgrader_ranges(name: str) -> List[_UpgraderRange]: ... - def _test_only_add_entry_to_op_version(op_name: str, entry: _UpgraderEntry) -> None: ... - def _test_only_remove_entry_to_op_version(op_name: str) -> None: ... # Defined in torch/csrc/jit/python/script_init.cpp @@ -704,14 +839,12 @@ class ScriptModuleSerializer: def serialize(self, model: ScriptModule, script_module_id: _int) -> None: ... def write_files(self) -> None: ... def storage_context(self) -> SerializationStorageContext: ... - ... # Defined in torch/csrc/jit/python/script_init.cpp class SerializationStorageContext: def __init__(self) -> None: ... def has_storage(self, storage: Storage) -> _bool: ... def get_or_add_storage(self, storage: Storage) -> _int: ... - ... # Defined in torch/csrc/jit/python/script_init.cpp class DeserializationStorageContext: @@ -719,7 +852,6 @@ class DeserializationStorageContext: def get_storage(self, name: str, dtype: _dtype) -> Tensor: ... def has_storage(self, name: str) -> _bool: ... def add_storage(self, name: str, tensor: Tensor) -> _int: ... - ... # Defined in torch/csrc/jit/python/script_init.cpp class ConcreteModuleTypeBuilder: @@ -728,13 +860,24 @@ class ConcreteModuleTypeBuilder: def set_module_list(self): ... def set_parameter_list(self): ... def set_parameter_dict(self): ... - def add_attribute(self, name: str, ty: JitType, is_param: _bool, is_buffer: _bool): ... + def add_attribute( + self, + name: str, + ty: JitType, + is_param: _bool, + is_buffer: _bool, + ): ... def add_module(self, name: str, meta: ConcreteModuleType): ... def add_constant(self, name: str, value: Any): ... def add_overload(self, method_name: str, overloaded_method_names: List[str]): ... def add_builtin_function(self, name: str, symbol_name: str): ... def add_failed_attribute(self, name: str, failure_reason: str): ... - def add_function_attribute(self, name: str, ty: JitType, func: Callable[..., Any]): ... + def add_function_attribute( + self, + name: str, + ty: JitType, + func: Callable[..., Any], + ): ... def add_ignored_attribute(self, name: str): ... def add_ignored_attributes(self, names: List[str]): ... def add_forward_hook(self, hook: Callable[..., Any]): ... @@ -742,8 +885,7 @@ class ConcreteModuleTypeBuilder: class ConcreteModuleType: def get_constants(self) -> Dict[str, Any]: ... - def equals(self, other: 'ConcreteModuleType') -> _bool: ... - + def equals(self, other: ConcreteModuleType) -> _bool: ... @staticmethod def from_jit_type(ty: JitType) -> ConcreteModuleType: ... @@ -753,18 +895,27 @@ class CallStack: class ErrorReport: def __init__(self, range: SourceRange) -> None: ... def what(self) -> str: ... - @staticmethod def call_stack() -> str: ... class CompilationUnit: - def __init__(self, lang: str=..., _frames_up: _int=...) -> None: ... + def __init__(self, lang: str = ..., _frames_up: _int = ...) -> None: ... def find_function(self, name: str) -> ScriptFunction: ... def __getattr__(self, name: str) -> ScriptFunction: ... - def define(self, script: str, rcb: ResolutionCallback=..., _frames_up: _int=...): ... + def define( + self, + script: str, + rcb: ResolutionCallback = ..., + _frames_up: _int = ..., + ): ... def get_interface(self, name: str) -> InterfaceType: ... def get_functions(self) -> List[ScriptFunction]: ... - def create_function(self, name: str, graph: Graph, shouldMangle: _bool=...) -> ScriptFunction: ... + def create_function( + self, + name: str, + graph: Graph, + shouldMangle: _bool = ..., + ) -> ScriptFunction: ... def get_class(self, name: str) -> ClassType: ... class ScriptObject: @@ -811,8 +962,7 @@ class BufferDict: def __init__(self, mod: ScriptModule) -> None: ... # Defined in torch/csrc/jit/api/module.h -class Module: - ... +class Module: ... # Defined in torch/csrc/Module.cpp def _initExtension(shm_manager_path: str) -> None: ... # THPModule_initExtension @@ -829,20 +979,28 @@ def _crash_if_aten_asan() -> _int: ... # THPModule_crashIfATenASAN def _show_config() -> str: ... # THPModule_showConfig def _cxx_flags() -> str: ... # THPModule_cxxFlags def _parallel_info() -> str: ... # THPModule_parallelInfo -def _set_backcompat_broadcast_warn(arg: _bool) -> None: ... # THPModule_setBackcompatBroadcastWarn +def _set_backcompat_broadcast_warn( + arg: _bool, +) -> None: ... # THPModule_setBackcompatBroadcastWarn def _get_backcompat_broadcast_warn() -> _bool: ... # THPModule_getBackcompatBroadcastWarn -def _set_backcompat_keepdim_warn(arg: _bool) -> None: ... # THPModule_setBackcompatKeepdimWarn +def _set_backcompat_keepdim_warn( + arg: _bool, +) -> None: ... # THPModule_setBackcompatKeepdimWarn def _get_backcompat_keepdim_warn() -> _bool: ... # THPModule_getBackcompatKeepdimWarn def get_num_thread() -> _int: ... # THPModule_getNumThreads def set_num_threads(nthreads: _int) -> None: ... # THPModule_setNumThreads def get_num_interop_threads() -> _int: ... # THPModule_getNumInteropThreads -def set_num_interop_threads(nthreads: _int) -> None: ... # THPModule_setNumInteropThreads +def set_num_interop_threads( + nthreads: _int, +) -> None: ... # THPModule_setNumInteropThreads def _get_cudnn_enabled() -> _bool: ... # THPModule_userEnabledCuDNN def _set_cudnn_enabled(arg: _bool) -> None: ... # THPModule_setUserEnabledCuDNN def _get_flash_sdp_enabled() -> _bool: ... # THPModule_userEnabledFusedSDP def _set_sdp_use_flash(arg: _bool) -> None: ... # THPModule_setSDPUseFlash def _get_mem_efficient_sdp_enabled() -> _bool: ... # THPModule_userEnabledMathSDP -def _set_sdp_use_mem_efficient(arg: _bool) -> None: ... # THPModule_setSDPUseMemEfficient +def _set_sdp_use_mem_efficient( + arg: _bool, +) -> None: ... # THPModule_setSDPUseMemEfficient def _get_math_sdp_enabled() -> _bool: ... # THPModule_userEnabledMathSDP def _set_sdp_use_math(arg: _bool) -> None: ... # THPModule_setSDPUseMath def _get_mkldnn_enabled() -> _bool: ... # THPModule_userEnabledMkldnn @@ -853,32 +1011,50 @@ def _get_cudnn_deterministic() -> _bool: ... # THPModule_deterministicCuDNN def _set_cudnn_deterministic(arg: _bool) -> None: ... # THPModule_setDeterministicCuDNN def _get_deterministic_algorithms() -> _bool: ... # THPModule_deterministicAlgorithms def _get_deterministic_algorithms_warn_only() -> _bool: ... # THPModule_deterministicAlgorithmsWarnOnly -def _set_deterministic_algorithms(mode: _bool, *, warn_only: _bool=...) -> None: ... # THPModule_setDeterministicAlgorithms +def _set_deterministic_algorithms( + mode: _bool, + *, + warn_only: _bool = ..., +) -> None: ... # THPModule_setDeterministicAlgorithms def _get_warnAlways() -> _bool: ... # THPModule_warnAlways def _set_warnAlways(arg: _bool) -> None: ... # THPModule_setWarnAlways def _get_cudnn_allow_tf32() -> _bool: ... # THPModule_allowTF32CuDNN def _set_cudnn_allow_tf32(arg: _bool) -> None: ... # THPModule_setAllowTF32CuDNN def _get_cublas_allow_tf32() -> _bool: ... # THPModule_allowTF32CuBLAS def _set_cublas_allow_tf32(arg: _bool) -> None: ... # THPModule_setAllowTF32CuBLAS -def _get_float32_matmul_precision() -> str: ... #THPModule_float32MatmulPrecision -def _set_float32_matmul_precision(arg: str) -> None: ... #THPModule_setFloat32MatmulPrecision -def _get_cublas_allow_fp16_reduced_precision_reduction() -> _bool: ... #THPModule_allowFP16ReductionCuBLAS -def _set_cublas_allow_fp16_reduced_precision_reduction(arg: _bool) -> None: ... #THPModule_setAllowFP16ReductionCuBLAS -def _get_cublas_allow_bf16_reduced_precision_reduction() -> _bool: ... #THPModule_allowBF16ReductionCuBLAS -def _set_cublas_allow_bf16_reduced_precision_reduction(arg: _bool) -> None: ... #THPModule_setAllowBF16ReductionCuBLAS +def _get_float32_matmul_precision() -> str: ... # THPModule_float32MatmulPrecision +def _set_float32_matmul_precision( + arg: str, +) -> None: ... # THPModule_setFloat32MatmulPrecision +def _get_cublas_allow_fp16_reduced_precision_reduction() -> _bool: ... # THPModule_allowFP16ReductionCuBLAS +def _set_cublas_allow_fp16_reduced_precision_reduction( + arg: _bool, +) -> None: ... # THPModule_setAllowFP16ReductionCuBLAS +def _get_cublas_allow_bf16_reduced_precision_reduction() -> _bool: ... # THPModule_allowBF16ReductionCuBLAS +def _set_cublas_allow_bf16_reduced_precision_reduction( + arg: _bool, +) -> None: ... # THPModule_setAllowBF16ReductionCuBLAS def _set_conj(x: Tensor, conj: _bool) -> None: ... def _set_neg(x: Tensor, neg: _bool) -> None: ... def _set_meta_in_tls_dispatch_include(meta_in_tls: _bool) -> None: ... def _meta_in_tls_dispatch_include() -> _bool: ... def _select_conv_backend(*args, **kwargs) -> ConvBackend: ... -def _conv_determine_backend_memory_format(input: Tensor, weight: Tensor, backend: ConvBackend) -> memory_format: ... +def _conv_determine_backend_memory_format( + input: Tensor, + weight: Tensor, + backend: ConvBackend, +) -> memory_format: ... def _has_storage(x: Tensor) -> _bool: ... def _should_allow_numbers_as_tensors(func_name: str) -> _bool: ... + # NB: There is no Capsule type in typing, see # https://code.activestate.com/lists/python-dev/139675/ def _to_dlpack(data: Tensor) -> Any: ... # THPModule_toDLPack def _from_dlpack(data: Any) -> Tensor: ... # THPModule_fromDLPack -def _get_cpp_backtrace(frames_to_skip: _int, maximum_number_of_frames: _int) -> str: ... # THPModule_getCppBacktrace +def _get_cpp_backtrace( + frames_to_skip: _int, + maximum_number_of_frames: _int, +) -> str: ... # THPModule_getCppBacktrace def set_flush_denormal(arg: _bool) -> _bool: ... # THPModule_setFlushDenormal def get_default_dtype() -> _dtype: ... # THPModule_getDefaultDtype def _get_default_device() -> str: ... # THPModule_getDefaultDevice @@ -887,28 +1063,44 @@ def _set_qengine(qegine: _int) -> None: ... # THPModule_setQEngine def _supported_qengines() -> List[_int]: ... # THPModule_supportedQEngines def _is_xnnpack_enabled() -> _bool: ... # THPModule_isEnabledXNNPACK def _check_sparse_tensor_invariants() -> _bool: ... # THPModule_checkSparseTensorInvariants -def _set_check_sparse_tensor_invariants(arg: _bool) -> None: ... # THPModule_setCheckSparseTensorInvariants +def _set_check_sparse_tensor_invariants( + arg: _bool, +) -> None: ... # THPModule_setCheckSparseTensorInvariants def _set_default_mobile_cpu_allocator() -> None: ... # THPModule_setDefaultMobileCPUAllocator def _unset_default_mobile_cpu_allocator() -> None: ... # THPModule_unsetDefaultMobileCPUAllocator def _is_torch_function_enabled() -> _bool: ... # THPModule_isEnabledTorchFunction -def _has_torch_function(args: Iterable[Any]) -> _bool: ... # THPModule_has_torch_function +def _has_torch_function( + args: Iterable[Any], +) -> _bool: ... # THPModule_has_torch_function def _has_torch_function_unary(Any) -> _bool: ... # THPModule_has_torch_function_unary -def _has_torch_function_variadic(*args: Any) -> _bool: ... # THPModule_has_torch_function_variadic +def _has_torch_function_variadic( + *args: Any, +) -> _bool: ... # THPModule_has_torch_function_variadic def _vmapmode_increment_nesting() -> _int: ... # THPModule_vmapmode_increment_nesting def _vmapmode_decrement_nesting() -> _int: ... # THPModule_vmapmode_decrement_nesting def _log_api_usage_once(str) -> None: ... # LogAPIUsageOnceFromPython def _demangle(str) -> str: ... # c10::demangle -def _disabled_torch_function_impl(func: Callable, types: Iterable[Type], args: Tuple, kwargs: Dict) -> Any: ... # THPModule_disable_torch_function -def _disabled_torch_dispatch_impl(func: Callable, types: Iterable[Type], args: Tuple, kwargs: Dict) -> Any: ... # THPModule_disable_dispatch_function +def _disabled_torch_function_impl( + func: Callable, + types: Iterable[Type], + args: Tuple, + kwargs: Dict, +) -> Any: ... # THPModule_disable_torch_function +def _disabled_torch_dispatch_impl( + func: Callable, + types: Iterable[Type], + args: Tuple, + kwargs: Dict, +) -> Any: ... # THPModule_disable_dispatch_function def _get_linalg_preferred_backend() -> torch._C._LinalgBackend: ... def _set_linalg_preferred_backend(arg: torch._C._LinalgBackend): ... + class _LinalgBackend: Default: _LinalgBackend Cusolver: _LinalgBackend Magma: _LinalgBackend -class ConvBackend(Enum): - ... +class ConvBackend(Enum): ... # Defined in `valgrind.h` and `callgrind.h` respecitively. def _valgrind_supported_platform() -> _bool: ... # NVALGRIND @@ -956,14 +1148,12 @@ def __set_forward_AD_enabled(enabled: _bool) -> None: ... def __is_forward_AD_enabled() -> _bool: ... def _register_default_hooks(pack_hook: Callable, unpack_hook: Callable) -> None: ... def _reset_default_hooks() -> None: ... - -def _is_torch_function_mode_enabled()-> _bool: ... +def _is_torch_function_mode_enabled() -> _bool: ... def _set_torch_function_mode(cls: Any) -> None: ... def _push_on_torch_function_stack(cls: Any) -> None: ... def _pop_torch_function_stack() -> Any: ... def _get_function_stack_at(idx: _int) -> Any: ... def _len_torch_function_stack() -> _int: ... - def _set_torch_dispatch_mode(cls: Any) -> None: ... def _push_on_torch_dispatch_stack(cls: Any) -> None: ... def _pop_torch_dispatch_stack() -> Any: ... @@ -986,14 +1176,9 @@ class _ViewReplayEnabled: def __init__(self, mode: _bool) -> None: ... # Defined in torch/csrc/jit/python/script_init.cpp -class LoggerBase: - ... - -class NoopLogger(LoggerBase): - ... - -class LockingLogger(LoggerBase): - ... +class LoggerBase: ... +class NoopLogger(LoggerBase): ... +class LockingLogger(LoggerBase): ... class AggregationType(Enum): SUM = 0 @@ -1001,14 +1186,18 @@ class AggregationType(Enum): class FileCheck: def run(self, test_string: str) -> None: ... - def check(self, test_string: str) -> 'FileCheck': ... - def check_not(self, test_string: str) -> 'FileCheck': ... - def check_same(self, test_string: str) -> 'FileCheck': ... - def check_next(self, test_string: str) -> 'FileCheck': ... - def check_count(self, test_string: str, count: _int, exactly: _bool = False) -> 'FileCheck': ... - def check_dag(self, test_string: str) -> 'FileCheck': ... - def check_source_highlighted(self, test_string: str) -> 'FileCheck': ... - ... + def check(self, test_string: str) -> FileCheck: ... + def check_not(self, test_string: str) -> FileCheck: ... + def check_same(self, test_string: str) -> FileCheck: ... + def check_next(self, test_string: str) -> FileCheck: ... + def check_count( + self, + test_string: str, + count: _int, + exactly: _bool = False, + ) -> FileCheck: ... + def check_dag(self, test_string: str) -> FileCheck: ... + def check_source_highlighted(self, test_string: str) -> FileCheck: ... # Defined in torch/csrc/jit/python/init.cpp class PyTorchFileReader: @@ -1017,7 +1206,6 @@ class PyTorchFileReader: @overload def __init__(self, buffer: BinaryIO) -> None: ... def get_record(self, name: str) -> bytes: ... - ... class PyTorchFileWriter: @overload @@ -1029,7 +1217,6 @@ class PyTorchFileWriter: def set_min_version(self, version: _int) -> None: ... def get_all_written_records(self) -> List[str]: ... def archive_name(self) -> str: ... - ... def _jit_get_inline_everything_mode() -> _bool: ... def _jit_set_inline_everything_mode(enabled: _bool) -> None: ... @@ -1056,7 +1243,6 @@ class Generator: def seed(self) -> _int: ... def initial_seed(self) -> _int: ... - # Defined in torch/csrc/utils/python_dispatch.cpp class _DispatchOperatorHandle: @@ -1065,27 +1251,64 @@ class _DispatchOperatorHandle: class _DispatchModule: def def_(self, schema: str, alias: str = "") -> _DispatchModule: ... def def_legacy(self, schema: str) -> _DispatchModule: ... - def def_name_t_t(self, name: str, dispatch: str, debug: str = "default_def_name_t_t") -> _DispatchModule: ... - def def_schema_t_t(self, schema: str, dispatch: str, alias: str, debug: str = "default_def_schema_t_t") -> _DispatchModule: ... - def impl_t_t(self, name: str, dispatch: str, debug: str = "impl_t_t") -> _DispatchModule: ... + def def_name_t_t( + self, + name: str, + dispatch: str, + debug: str = "default_def_name_t_t", + ) -> _DispatchModule: ... + def def_schema_t_t( + self, + schema: str, + dispatch: str, + alias: str, + debug: str = "default_def_schema_t_t", + ) -> _DispatchModule: ... + def impl_t_t( + self, + name: str, + dispatch: str, + debug: str = "impl_t_t", + ) -> _DispatchModule: ... def impl(self, name: str, dispatch: str, func: Callable) -> _DispatchModule: ... def define(self, schema: str, alias: str = "") -> _DispatchModule: ... def fallback_fallthrough(self, dispatch: str = "") -> _DispatchModule: ... -def _dispatch_library(kind: str, name: str, dispatch: str, file: str = "", linenum: Any = 0) -> _DispatchModule: ... +def _dispatch_library( + kind: str, + name: str, + dispatch: str, + file: str = "", + linenum: Any = 0, +) -> _DispatchModule: ... def _dispatch_dump(name: str) -> str: ... def _dispatch_dump_table(name: str) -> str: ... def _dispatch_check_invariants(name: str) -> None: ... def _dispatch_check_all_invariants() -> None: ... def _dispatch_has_kernel(name: str) -> _bool: ... -def _dispatch_has_kernel_for_dispatch_key(name: str, dispatch: _dispatchkey) -> _bool: ... -def _dispatch_has_kernel_for_any_dispatch_key(name: str, dispatch_key_set: DispatchKeySet) -> _bool: ... -def _dispatch_has_computed_kernel_for_dispatch_key(name: str, dispatch: _dispatchkey) -> _bool: ... +def _dispatch_has_kernel_for_dispatch_key( + name: str, + dispatch: _dispatchkey, +) -> _bool: ... +def _dispatch_has_kernel_for_any_dispatch_key( + name: str, + dispatch_key_set: DispatchKeySet, +) -> _bool: ... +def _dispatch_has_computed_kernel_for_dispatch_key( + name: str, + dispatch: _dispatchkey, +) -> _bool: ... def _dispatch_find_dangling_impls() -> List[str]: ... def _dispatch_get_all_op_names() -> List[str]: ... -def _dispatch_tls_set_dispatch_key_excluded(dispatch: _dispatchkey, val: _bool) -> None: ... +def _dispatch_tls_set_dispatch_key_excluded( + dispatch: _dispatchkey, + val: _bool, +) -> None: ... def _dispatch_tls_is_dispatch_key_excluded(dispatch: _dispatchkey) -> _bool: ... -def _dispatch_tls_set_dispatch_key_included(dispatch: _dispatchkey, val: _bool) -> None: ... +def _dispatch_tls_set_dispatch_key_included( + dispatch: _dispatchkey, + val: _bool, +) -> None: ... def _dispatch_tls_is_dispatch_key_included(dispatch: _dispatchkey) -> _bool: ... def _dispatch_isTensorSubclassLike(tensor: Tensor) -> _bool: ... def _dispatch_key_name(dispatch: _dispatchkey) -> str: ... @@ -1105,36 +1328,36 @@ class DispatchKeySet: def __repr__(self) -> str: ... _dispatch_autogradother_backends: DispatchKeySet + def _dispatch_has_backend_fallback(dispatch: _dispatchkey) -> _bool: ... def _dispatch_keyset_full_after(t: _dispatchkey) -> DispatchKeySet: ... def _dispatch_keyset_to_string(keyset: DispatchKeySet) -> str: ... -def _dispatch_get_backend_keyset_from_autograd(dispatch: _dispatchkey) -> DispatchKeySet: ... +def _dispatch_get_backend_keyset_from_autograd( + dispatch: _dispatchkey, +) -> DispatchKeySet: ... def _dispatch_keys(tensor: Tensor) -> DispatchKeySet: ... def _dispatch_tls_local_exclude_set() -> DispatchKeySet: ... def _dispatch_tls_local_include_set() -> DispatchKeySet: ... -def _dispatch_is_included_in_alias(dispatch_a: _dispatchkey, dispatch_b: _dispatchkey) -> _bool: ... +def _dispatch_is_included_in_alias( + dispatch_a: _dispatchkey, + dispatch_b: _dispatchkey, +) -> _bool: ... -class ExcludeDispatchKeyGuard: - pass - -class _AutoDispatchBelowAutograd: - pass +class ExcludeDispatchKeyGuard: ... +class _AutoDispatchBelowAutograd: ... def _dispatch_print_registrations_for_dispatch_key(dispatch_key: str = "") -> None: ... -def _dispatch_get_registrations_for_dispatch_key(dispatch_key: str = "") -> List[str]: ... - +def _dispatch_get_registrations_for_dispatch_key( + dispatch_key: str = "", +) -> List[str]: ... def _are_functorch_transforms_active() -> _bool: ... # Define in torch/csrc/autograd/init.cpp -class _DisablePythonDispatcher: - pass - -class _EnablePythonDispatcher: - pass +class _DisablePythonDispatcher: ... +class _EnablePythonDispatcher: ... def _set_python_dispatcher(dispatcher: object) -> None: ... - # Defined in torch/csrc/utils/init.cpp class BenchmarkConfig: num_calling_threads: _int @@ -1166,8 +1389,7 @@ class _ImperativeEngine: def is_checkpoint_valid(self) -> _bool: ... # Defined in torch/csrc/autograd/python_variable.cpp -class _TensorMeta(type): - pass +class _TensorMeta(type): ... # Defined in torch/csrc/autograd/python_variable.cpp class _TensorBase(metaclass=_TensorMeta): @@ -1238,11 +1460,16 @@ def _cuda_memoryStats(device: _int) -> Dict[str, Any]: ... def _cuda_resetAccumulatedMemoryStats(device: _int) -> None: ... def _cuda_resetPeakMemoryStats(device: _int) -> None: ... def _cuda_memorySnapshot() -> Dict[str, Any]: ... -def _cuda_recordMemoryHistory(enabled: _bool, record_context: _bool, record_context_cpp: _bool, alloc_trace_max_entries: _int, alloc_trace_record_context: _bool) -> None: ... +def _cuda_recordMemoryHistory( + enabled: _bool, + record_context: _bool, + record_context_cpp: _bool, + alloc_trace_max_entries: _int, + alloc_trace_record_context: _bool, +) -> None: ... def _cuda_getAllocatorBackend() -> str: ... -class _cuda_CUDAAllocator: - ... +class _cuda_CUDAAllocator: ... def _cuda_customAllocator(alloc_fn: _int, free_fn: _int) -> _cuda_CUDAAllocator: ... def _cuda_changeCurrentAllocator(allocator: _cuda_CUDAAllocator) -> None: ... @@ -1250,44 +1477,55 @@ def _cuda_getAllocator() -> _cuda_CUDAAllocator: ... def _cuda_lock_mutex() -> None: ... def _cuda_unlock_mutex() -> None: ... def _cuda_canDeviceAccessPeer(device: _int, peer_device: _int) -> _bool: ... -def _cuda_jiterator_compile_and_launch_kernel(code_string: str, - kernel_name: str, - return_by_ref: _bool, - num_outputs: _int, - tensors: Tuple, - kwargs: Dict[str, Union[_int, _float, _bool]]) -> Tensor: ... +def _cuda_jiterator_compile_and_launch_kernel( + code_string: str, + kernel_name: str, + return_by_ref: _bool, + num_outputs: _int, + tensors: Tuple, + kwargs: Dict[str, Union[_int, _float, _bool]], +) -> Tensor: ... def _cuda_get_cudnn_benchmark_limit() -> _int: ... def _cuda_set_cudnn_benchmark_limit(arg: _int) -> None: ... def _nccl_version() -> _int: ... def _nccl_unique_id() -> bytes: ... def _nccl_init_rank(nranks: _int, comm_id: bytes, rank: _int) -> object: ... -def _nccl_reduce(input: Sequence[Tensor], - output: Tensor, - root: _int, - op: _int, - streams: Optional[Sequence[_CudaStreamBase]], - comms: Optional[Sequence[object]]) -> None: ... -def _nccl_all_reduce(input: Sequence[Tensor], - output: Sequence[Tensor], - op: _int, - streams: Optional[Sequence[_CudaStreamBase]], - comms: Optional[Sequence[object]]) -> None: ... -def _nccl_broadcast(input: Sequence[Tensor], - root: _int, - streams: Optional[Sequence[_CudaStreamBase]], - comms: Optional[Sequence[object]]) -> None: ... -def _nccl_all_gather(input: Sequence[Tensor], - output: Sequence[Tensor], - streams: Optional[Sequence[_CudaStreamBase]], - comms: Optional[Sequence[object]]) -> None: ... -def _nccl_reduce_scatter(input: Sequence[Tensor], - output: Sequence[Tensor], - op: _int, - streams: Optional[Sequence[_CudaStreamBase]], - comms: Optional[Sequence[object]]) -> None: ... +def _nccl_reduce( + input: Sequence[Tensor], + output: Tensor, + root: _int, + op: _int, + streams: Optional[Sequence[_CudaStreamBase]], + comms: Optional[Sequence[object]], +) -> None: ... +def _nccl_all_reduce( + input: Sequence[Tensor], + output: Sequence[Tensor], + op: _int, + streams: Optional[Sequence[_CudaStreamBase]], + comms: Optional[Sequence[object]], +) -> None: ... +def _nccl_broadcast( + input: Sequence[Tensor], + root: _int, + streams: Optional[Sequence[_CudaStreamBase]], + comms: Optional[Sequence[object]], +) -> None: ... +def _nccl_all_gather( + input: Sequence[Tensor], + output: Sequence[Tensor], + streams: Optional[Sequence[_CudaStreamBase]], + comms: Optional[Sequence[object]], +) -> None: ... +def _nccl_reduce_scatter( + input: Sequence[Tensor], + output: Sequence[Tensor], + op: _int, + streams: Optional[Sequence[_CudaStreamBase]], + comms: Optional[Sequence[object]], +) -> None: ... def _rocm_is_backward_pass() -> _bool: ... - class _CudaDeviceProperties: name: str major: _int @@ -1303,12 +1541,26 @@ def _broadcast_out(tensor: Tensor, out_tensors: List[Tensor]) -> List[Tensor]: . def _broadcast_coalesced( tensors: List[Tensor], devices: List[_int], - buffer_size: _int + buffer_size: _int, ) -> List[List[Tensor]]: ... - -def _scatter(tensor: Tensor, devices: List[_int], chunk_sizes: Optional[List[_int]], dim: _int, streams: Optional[List[Stream]]) -> List[Tensor]: ... -def _scatter_out(tensor: Tensor, out_tensors: List[Tensor], dim: _int, streams: Optional[List[Stream]]) -> List[Tensor]: ... -def _gather(tensors: List[Tensor], dim: _int, destination_index: Optional[_int]) -> Tensor: ... +def _scatter( + tensor: Tensor, + devices: List[_int], + chunk_sizes: Optional[List[_int]], + dim: _int, + streams: Optional[List[Stream]], +) -> List[Tensor]: ... +def _scatter_out( + tensor: Tensor, + out_tensors: List[Tensor], + dim: _int, + streams: Optional[List[Stream]], +) -> List[Tensor]: ... +def _gather( + tensors: List[Tensor], + dim: _int, + destination_index: Optional[_int], +) -> Tensor: ... def _gather_out(tensors: List[Tensor], out_tensor: Tensor, dim: _int) -> Tensor: ... # Defined in torch/csrc/cuda/Stream.cpp @@ -1321,7 +1573,13 @@ class _CudaStreamBase: cuda_stream: _int priority: _int - def __new__(self, priority: _int = 0, stream_id: _int = 0, device_index: _int = 0, stream_ptr: _int = 0) -> _CudaStreamBase: ... + def __new__( + self, + priority: _int = 0, + stream_id: _int = 0, + device_index: _int = 0, + stream_ptr: _int = 0, + ) -> _CudaStreamBase: ... def query(self) -> _bool: ... def synchronize(self) -> None: ... def priority_range(self) -> Tuple[_int, _int]: ... @@ -1331,7 +1589,12 @@ class _CudaEventBase: device: _device cuda_event: _int - def __new__(cls, enable_timing: _bool = False, blocking: _bool = False, interprocess: _bool = False) -> _CudaEventBase: ... + def __new__( + cls, + enable_timing: _bool = False, + blocking: _bool = False, + interprocess: _bool = False, + ) -> _CudaEventBase: ... @classmethod def from_ipc_handle(cls, device: _device, ipc_handle: bytes) -> _CudaEventBase: ... def record(self, stream: _CudaStreamBase) -> None: ... @@ -1343,23 +1606,25 @@ class _CudaEventBase: # Defined in torch/csrc/cuda/Graph.cpp class _CUDAGraph: - def capture_begin(self, - pool: Optional[Tuple[_int, _int]]=...) -> None: ... + def capture_begin(self, pool: Optional[Tuple[_int, _int]] = ...) -> None: ... def capture_end(self) -> None: ... def replay(self) -> None: ... def reset(self) -> None: ... def pool(self) -> Tuple[_int, _int]: ... def enable_debug_mode(self) -> None: ... - def debug_dump(self, - debug_path: str) -> None: ... + def debug_dump(self, debug_path: str) -> None: ... def _cuda_isCurrentStreamCapturing() -> _bool: ... - def _graph_pool_handle() -> Tuple[_int, _int]: ... # Defined in torch/csrc/DataLoader.cpp -def _set_worker_signal_handlers(*arg: Any) -> None: ... # THPModule_setWorkerSignalHandlers -def _set_worker_pids(key: _int, child_pids: Tuple[_int, ...]) -> None: ... # THPModule_setWorkerPIDs +def _set_worker_signal_handlers( + *arg: Any, +) -> None: ... # THPModule_setWorkerSignalHandlers +def _set_worker_pids( + key: _int, + child_pids: Tuple[_int, ...], +) -> None: ... # THPModule_setWorkerPIDs def _remove_worker_pids(loader_id: _int) -> None: ... # THPModule_removeWorkerPIDs def _error_if_any_worker_fails() -> None: ... # THPModule_errorIfAnyWorkerFails @@ -1370,7 +1635,6 @@ class TracingState: def current_scope(self) -> str: ... def set_graph(self, graph: Graph) -> None: ... def graph(self) -> Graph: ... - ... def _create_graph_by_tracing( func: Callable[..., Any], @@ -1379,19 +1643,19 @@ def _create_graph_by_tracing( strict: Any, force_outplace: Any, self: Any = None, - argument_names: List[str] = [] + argument_names: List[str] = [], ) -> Tuple[Graph, Stack]: ... def _tracer_warn_use_python(): ... def _get_tracing_state() -> TracingState: ... # Defined in torch/csrc/jit/python/python_ir.cpp # Not actually defined in python_ir.cpp, not sure where they are. -class IValue: - ... +class IValue: ... + Stack = List[IValue] class JitType: - annotation_str : str + annotation_str: str def isSubtypeOf(self, other: JitType) -> _bool: ... def with_dtype(self, dtype: _dtype) -> JitType: ... def with_sizes(self, sizes: List[Optional[_int]]) -> JitType: ... @@ -1406,7 +1670,7 @@ class InferredType: def success(self) -> _bool: ... def reason(self) -> str: ... -R = TypeVar('R', bound=JitType) +R = TypeVar("R", bound=JitType) class AnyType(JitType): @staticmethod @@ -1451,7 +1715,6 @@ class StreamObjType(JitType): class ListType(JitType): def __init__(self, a: JitType) -> None: ... def getElementType(self) -> JitType: ... - @staticmethod def ofInts() -> ListType: ... @staticmethod @@ -1488,7 +1751,6 @@ class InterfaceType(JitType): class OptionalType(JitType, Generic[R]): def __init__(self, a: JitType) -> None: ... def getElementType(self) -> JitType: ... - @staticmethod def ofTensor() -> OptionalType: ... @@ -1508,9 +1770,8 @@ class EnumType(JitType): self, qualified_name: str, value_type: JitType, - enum_names_values: List[Any] - ) -> None: - ... + enum_names_values: List[Any], + ) -> None: ... class TensorType(JitType): @classmethod @@ -1528,24 +1789,19 @@ class TensorType(JitType): def create_from_tensor(t: Tensor) -> TensorType: ... # Defined in torch/csrc/jit/python/python_tree_views.cpp -class SourceRange: - ... - -class TreeView: - ... +class SourceRange: ... +class TreeView: ... class Ident(TreeView): @property def name(self) -> str: ... -class ClassDef(TreeView): - ... +class ClassDef(TreeView): ... class Def(TreeView): def name(self) -> Ident: ... -class Decl(TreeView): - ... +class Decl(TreeView): ... # Defined in torch/csrc/distributed/rpc/init.cpp def _rpc_init() -> _bool: ... @@ -1558,7 +1814,6 @@ def _c10d_init() -> _bool: ... # Defined in torch/csrc/distributed/rpc/testing/init.cpp def _faulty_agent_init() -> _bool: ... - def _enable_minidumps(directory: str) -> None: ... def _disable_minidumps() -> None: ... def _enable_minidumps_on_exceptions() -> None: ... @@ -1567,10 +1822,7 @@ def _activate_cuda_trace() -> None: ... # Defined in torch/csrc/Module.cpp def _current_graph_task_id() -> _int: ... -def _current_autograd_node() -> _Node: ... +def _current_autograd_node() -> _Node: ... -class _OutOfMemoryError: - pass - -class _DistBackendError(RuntimeError): - pass +class _OutOfMemoryError: ... +class _DistBackendError(RuntimeError): ... diff --git a/torch/_C/_autograd.pyi b/torch/_C/_autograd.pyi index 391095e3b3bc..35fdcde536b5 100644 --- a/torch/_C/_autograd.pyi +++ b/torch/_C/_autograd.pyi @@ -1,8 +1,14 @@ -from typing import List, Set, Callable, Any, Union, Optional from enum import Enum +from typing import Any, Callable, List, Optional, Set import torch -from ._profiler import _ProfilerEvent, ActiveProfilerType, ProfilerActivity, ProfilerConfig + +from ._profiler import ( + _ProfilerEvent, + ActiveProfilerType, + ProfilerActivity, + ProfilerConfig, +) # Defined in tools/autograd/init.cpp @@ -22,7 +28,6 @@ class DeviceType(Enum): Meta = ... Vulkan = ... Metal = ... - ... class ProfilerEvent: def cpu_elapsed_us(self, other: ProfilerEvent) -> float: ... @@ -41,7 +46,6 @@ class ProfilerEvent: def thread_id(self) -> int: ... def flops(self) -> float: ... def is_async(self) -> bool: ... - ... class _KinetoEvent: def name(self) -> str: ... @@ -50,7 +54,6 @@ class _KinetoEvent: def duration_us(self) -> int: ... def is_async(self) -> bool: ... def linked_correlation_id(self) -> int: ... - ... class _ProfilerResult: def events(self) -> List[_KinetoEvent]: ... @@ -58,11 +61,16 @@ class _ProfilerResult: def save(self, path: str) -> None: ... def experimental_event_tree(self) -> List[_ProfilerEvent]: ... -class SavedTensor: - ... +class SavedTensor: ... -def _enable_profiler(config: ProfilerConfig, activities: Set[ProfilerActivity]) -> None: ... -def _prepare_profiler(config: ProfilerConfig, activities: Set[ProfilerActivity]) -> None: ... +def _enable_profiler( + config: ProfilerConfig, + activities: Set[ProfilerActivity], +) -> None: ... +def _prepare_profiler( + config: ProfilerConfig, + activities: Set[ProfilerActivity], +) -> None: ... def _disable_profiler() -> _ProfilerResult: ... def _profiler_enabled() -> bool: ... def _add_metadata_json(key: str, value: str) -> None: ... @@ -73,15 +81,15 @@ def _record_function_with_args_exit(handle: torch.Tensor) -> None: ... def _supported_activities() -> Set[ProfilerActivity]: ... def _enable_record_function(enable: bool) -> None: ... def _set_empty_test_observer(is_global: bool, sampling_prob: float) -> None: ... -def _push_saved_tensors_default_hooks(pack_hook: Callable, unpack_hook: Callable) -> None: ... +def _push_saved_tensors_default_hooks( + pack_hook: Callable, + unpack_hook: Callable, +) -> None: ... def _pop_saved_tensors_default_hooks() -> None: ... - def _unsafe_set_version_counter(t: torch.Tensor, prev_version: int) -> None: ... - def _enable_profiler_legacy(config: ProfilerConfig) -> None: ... def _disable_profiler_legacy() -> List[List[ProfilerEvent]]: ... def _profiler_type() -> ActiveProfilerType: ... - def _saved_tensors_hooks_enable() -> None: ... def _saved_tensors_hooks_disable(message: str) -> None: ... def _saved_tensors_hooks_get_disabled_error_message() -> Optional[str]: ... diff --git a/torch/_C/_cudnn.pyi b/torch/_C/_cudnn.pyi index e6c7beb31680..689c984b9d7d 100644 --- a/torch/_C/_cudnn.pyi +++ b/torch/_C/_cudnn.pyi @@ -1,6 +1,6 @@ from enum import Enum -from torch.types import Tuple, Number, _bool +from torch.types import _bool, Tuple # Defined in torch/csrc/cuda/shared/cudnn.cpp is_cuda: _bool diff --git a/torch/_C/_distributed_autograd.pyi b/torch/_C/_distributed_autograd.pyi index 39cbb984c635..f4c91304a1b1 100644 --- a/torch/_C/_distributed_autograd.pyi +++ b/torch/_C/_distributed_autograd.pyi @@ -1,5 +1,6 @@ +from typing import Any, Dict, List, Set + import torch -from typing import Dict, List, Set, Any # This module is defined in torch/csrc/distributed/autograd/init.cpp @@ -20,6 +21,6 @@ def _get_debug_info() -> Dict[str, str]: ... def backward( context_id: int, roots: List[torch.Tensor], - retain_graph = False + retain_graph=False, ) -> None: ... def get_gradients(context_id: int) -> Dict[torch.Tensor, torch.Tensor]: ... diff --git a/torch/_C/_distributed_c10d.pyi b/torch/_C/_distributed_c10d.pyi index dea04934c414..95fe5d704cad 100644 --- a/torch/_C/_distributed_c10d.pyi +++ b/torch/_C/_distributed_c10d.pyi @@ -17,7 +17,8 @@ class BuiltinCommHookType(Enum): def _register_comm_hook(reducer: Reducer, state: Any, comm_hook: Any): ... def _register_builtin_comm_hook( - reducer: Reducer, comm_hook_type: BuiltinCommHookType + reducer: Reducer, + comm_hook_type: BuiltinCommHookType, ): ... class GradBucket: @@ -50,7 +51,9 @@ class Reducer: def _get_zeros_like_grad_buckets(self) -> List[GradBucket]: ... def _push_all_rebuilt_params(self) -> None: ... def _set_forward_pass_work_handle( - self, work: Work, use_static_world_size: bool + self, + work: Work, + use_static_world_size: bool, ): ... def _get_local_used_map(self) -> Tensor: ... def _set_ddp_runtime_logging_sample_rate(self, sample_rate: int) -> None: ... @@ -90,7 +93,6 @@ class DebugLevel(Enum): DETAIL = ... class ReduceOp: - def __init__(self, op: "RedOpType"): ... SUM = ... @@ -149,7 +151,10 @@ class Store: def get(self, key: str) -> bytes: ... def add(self, key: str, value: int) -> int: ... def compare_set( - self, key: str, expected_value: str, desired_value: str + self, + key: str, + expected_value: str, + desired_value: str, ) -> bytes: ... def delete_key(self, key: str) -> bool: ... def num_keys(self) -> int: ... @@ -195,7 +200,6 @@ class Work: def _source_rank(self) -> int: ... def result(self) -> List[Tensor]: ... def synchronize(self): ... - ... class ProcessGroup: class Options: ... @@ -385,11 +389,9 @@ class ProcessGroupGloo(ProcessGroup): timeout: timedelta, ): ... @staticmethod - def create_device(hostname=str(), interface=str()) -> Device: ... - ... + def create_device(hostname="", interface="") -> Device: ... @staticmethod def create_default_device() -> Device: ... - ... class _ProcessGroupWrapper(ProcessGroup): def __init__(self, pg: ProcessGroup, gloo_pg: ProcessGroupGloo): ... @@ -409,7 +411,6 @@ class ProcessGroupNCCL(ProcessGroup): def _group_start() -> None: ... @staticmethod def _group_end() -> None: ... - ... class ProcessGroupUCC(ProcessGroup): def __init__( diff --git a/torch/_C/_distributed_rpc.pyi b/torch/_C/_distributed_rpc.pyi index 72448910e4bc..761d25711523 100644 --- a/torch/_C/_distributed_rpc.pyi +++ b/torch/_C/_distributed_rpc.pyi @@ -1,8 +1,8 @@ -from typing import Any, Dict, List, Optional, Tuple, Union, overload from datetime import timedelta -import enum +from typing import Any, Dict, List, Optional, overload, Tuple + import torch -from torch.types import Device + from . import Future from ._autograd import ProfilerEvent from ._distributed_c10d import ProcessGroup, Store @@ -59,13 +59,12 @@ class PyRRef: def remote(self, timeout: float = _UNSET_RPC_TIMEOUT) -> Any: ... def _serialize(self) -> Tuple: ... @staticmethod - def _deserialize(tp: Tuple) -> 'PyRRef': ... + def _deserialize(tp: Tuple) -> "PyRRef": ... def _get_type(self) -> Any: ... def _get_future(self) -> Future: ... def _get_profiling_future(self) -> Future: ... def _set_profiling_future(self, profilingFuture: Future): ... def __repr__(self) -> str: ... - ... class _TensorPipeRpcBackendOptionsBase(RpcBackendOptions): num_worker_threads: int @@ -79,8 +78,13 @@ class _TensorPipeRpcBackendOptionsBase(RpcBackendOptions): rpc_timeout: float = _DEFAULT_RPC_TIMEOUT_SEC, init_method: str = _DEFAULT_INIT_METHOD, device_maps: Dict[str, Dict[torch.device, torch.device]] = {}, - devices: List[torch.device] = list()): ... - def _set_device_map(self, to: str, device_map: Dict[torch.device, torch.device]): ... + devices: List[torch.device] = [], + ): ... + def _set_device_map( + self, + to: str, + device_map: Dict[torch.device, torch.device], + ): ... class TensorPipeAgent(RpcAgent): def __init__( @@ -108,7 +112,8 @@ class TensorPipeAgent(RpcAgent): worker_info: WorkerInfo, my_devices: List[torch.device], reverse_device_map: Dict[str, Dict[torch.device, torch.device]], - is_join: bool): ... + is_join: bool, + ): ... def _get_backend_options(self) -> _TensorPipeRpcBackendOptionsBase: ... @property def is_static_group(self) -> bool: ... @@ -116,7 +121,7 @@ class TensorPipeAgent(RpcAgent): def store(self) -> Store: ... def _is_current_rpc_agent_set() -> bool: ... -def _get_current_rpc_agent()-> RpcAgent: ... +def _get_current_rpc_agent() -> RpcAgent: ... def _set_and_start_rpc_agent(agent: RpcAgent): ... def _reset_current_rpc_agent(): ... def _delete_all_user_and_unforked_owner_rrefs(timeout: timedelta = ...): ... @@ -128,15 +133,15 @@ def _invoke_rpc_builtin( opName: str, rpcTimeoutSeconds: float, *args: Any, - **kwargs: Any - ): ... + **kwargs: Any, +): ... def _invoke_rpc_python_udf( dst: WorkerInfo, pickledPythonUDF: str, tensors: List[torch.Tensor], rpcTimeoutSeconds: float, - isAsyncExecution: bool - ): ... + isAsyncExecution: bool, +): ... def _invoke_rpc_torchscript( dstWorkerName: str, qualifiedNameStr: str, @@ -144,29 +149,29 @@ def _invoke_rpc_torchscript( kwargsDict: Dict, rpcTimeoutSeconds: float, isAsyncExecution: bool, - ): ... +): ... def _invoke_remote_builtin( dst: WorkerInfo, opName: str, rpcTimeoutSeconds: float, *args: Any, - **kwargs: Any - ): ... + **kwargs: Any, +): ... def _invoke_remote_python_udf( dst: WorkerInfo, pickledPythonUDF: str, tensors: List[torch.Tensor], rpcTimeoutSeconds: float, isAsyncExecution: bool, - ): ... +): ... def _invoke_remote_torchscript( dstWorkerName: WorkerInfo, qualifiedNameStr: str, rpcTimeoutSeconds: float, isAsyncExecution: bool, *args: Any, - **kwargs: Any - ): ... + **kwargs: Any, +): ... def get_rpc_timeout() -> float: ... def enable_gil_profiling(flag: bool): ... def _set_rpc_timeout(rpcTimeoutSeconds: float): ... diff --git a/torch/_C/_distributed_rpc_testing.pyi b/torch/_C/_distributed_rpc_testing.pyi index a2e69f566d32..f1124aff5fc3 100644 --- a/torch/_C/_distributed_rpc_testing.pyi +++ b/torch/_C/_distributed_rpc_testing.pyi @@ -1,12 +1,13 @@ +from typing import Dict, List + import torch + from ._distributed_c10d import ProcessGroup, Store from ._distributed_rpc import ( _TensorPipeRpcBackendOptionsBase, TensorPipeAgent, WorkerInfo, ) -from typing import List, Dict, overload -from datetime import timedelta # This module is defined in torch/csrc/distributed/rpc/testing/init.cpp diff --git a/torch/_C/_dynamo/eval_frame.pyi b/torch/_C/_dynamo/eval_frame.pyi index 3428342750cc..462cf1cfa995 100644 --- a/torch/_C/_dynamo/eval_frame.pyi +++ b/torch/_C/_dynamo/eval_frame.pyi @@ -1,5 +1,5 @@ import types -from typing import Union + from torch._dynamo.types import DynamoCallback, DynamoGuardHook def set_eval_frame(callback: DynamoCallback) -> DynamoCallback: ... diff --git a/torch/_C/_functions.pyi b/torch/_C/_functions.pyi index da2eb27b7cbb..151c1077b1a3 100644 --- a/torch/_C/_functions.pyi +++ b/torch/_C/_functions.pyi @@ -1,12 +1,11 @@ -from torch import Tensor from typing import AnyStr, List +from torch import Tensor + class UndefinedGrad: def __init__(self) -> None: ... def __call__(self, *inputs: Tensor) -> List[Tensor]: ... - ... class DelayedError: def __init__(self, msg: AnyStr, num_inputs: int) -> None: ... def __call__(self, inputs: List[Tensor]) -> List[Tensor]: ... - ... diff --git a/torch/_C/_functorch.pyi b/torch/_C/_functorch.pyi index bafa63daa448..2cfd55459d34 100644 --- a/torch/_C/_functorch.pyi +++ b/torch/_C/_functorch.pyi @@ -1,7 +1,8 @@ -from torch import Tensor from enum import Enum from typing import Optional, Tuple +from torch import Tensor + # Defined in torch/csrc/functorch/init.cpp def _set_dynamic_layer_keys_included(included: bool) -> None: ... @@ -18,7 +19,6 @@ def _wrap_for_grad(tensor: Tensor, level: int) -> Tensor: ... def _unwrap_batched(tensor: Tensor, level: int) -> Tuple[Tensor, Optional[int]]: ... def current_level() -> int: ... def _add_batch_dim(tensor: Tensor, bdim: int, level: int) -> Tensor: ... - def set_single_level_autograd_function_allowed(allowed: bool) -> None: ... def get_single_level_autograd_function_allowed() -> bool: ... diff --git a/torch/_C/_lazy.pyi b/torch/_C/_lazy.pyi index 2a2df34f1320..ceaaedee2102 100644 --- a/torch/_C/_lazy.pyi +++ b/torch/_C/_lazy.pyi @@ -1,4 +1,5 @@ from typing import List + from torch import Tensor # defined in torch/csrc/lazy/python/init.cpp diff --git a/torch/_C/_lazy_ts_backend.pyi b/torch/_C/_lazy_ts_backend.pyi index 91575fe939bf..ce833c5ec2e4 100644 --- a/torch/_C/_lazy_ts_backend.pyi +++ b/torch/_C/_lazy_ts_backend.pyi @@ -1,8 +1,11 @@ -#defined in torch/csrc/lazy/python/init.cpp +# defined in torch/csrc/lazy/python/init.cpp + +from typing import Any, List, Tuple -from typing import List, Tuple, Any from torch import Tensor def _init(): ... -def _get_tensors_ts_device_data_node(tensors: List[Tensor]) -> Tuple[List[int], List[Any]]: ... +def _get_tensors_ts_device_data_node( + tensors: List[Tensor], +) -> Tuple[List[int], List[Any]]: ... def _run_cached_graph(hash_str: str, graph_inputs: List[Any]) -> List[Tensor]: ... diff --git a/torch/_C/_monitor.pyi b/torch/_C/_monitor.pyi index 47771f180ac6..9950a9e8c30a 100644 --- a/torch/_C/_monitor.pyi +++ b/torch/_C/_monitor.pyi @@ -1,8 +1,8 @@ # Defined in torch/csrc/monitor/python_init.cpp -from typing import List, Dict, Callable, Union -from enum import Enum import datetime +from enum import Enum +from typing import Callable, Dict, List, Union class Aggregation(Enum): VALUE = ... @@ -16,7 +16,10 @@ class Stat: name: str count: int def __init__( - self, name: str, aggregations: List[Aggregation], window_size: int, + self, + name: str, + aggregations: List[Aggregation], + window_size: int, max_samples: int = -1, ) -> None: ... def add(self, v: float) -> None: ... diff --git a/torch/_C/_nn.pyi.in b/torch/_C/_nn.pyi.in index 1198c43da450..3e86ff3358fb 100644 --- a/torch/_C/_nn.pyi.in +++ b/torch/_C/_nn.pyi.in @@ -1,6 +1,7 @@ -from torch import Tensor, memory_format -from typing import Callable, Optional, List, overload, Tuple -from torch.types import _bool, _dtype, _device +from typing import Callable, List, Optional, overload, Tuple + +from torch import memory_format, Tensor +from torch.types import _bool, _device, _dtype # Defined in tools/autograd/templates/python_nn_functions.cpp @@ -10,27 +11,56 @@ ${dispatched_hints} def mkldnn_linear(input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor: ... # Defined at aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp -def mkldnn_reorder_conv2d_weight(self: Tensor, padding: List, stride: List, dilatation: List, groups: int) -> Tensor: ... -def mkldnn_reorder_conv3d_weight(self: Tensor, padding: List, stride: List, dilatation: List, groups: int) -> Tensor: ... +def mkldnn_reorder_conv2d_weight( + self: Tensor, + padding: List, + stride: List, + dilatation: List, + groups: int, +) -> Tensor: ... +def mkldnn_reorder_conv3d_weight( + self: Tensor, + padding: List, + stride: List, + dilatation: List, + groups: int, +) -> Tensor: ... # Defined in aten/src/ATen/native/mkldnn/Prelu.cpp def mkldnn_prelu(input: Tensor, weight: Tensor) -> Tensor: ... # Defined at tools/autograd/templates/python_nn_functions.cpp @overload -def _parse_to(device: _device, dtype: _dtype, non_blocking: _bool, copy: _bool, *, - memory_format: memory_format) -> Tuple[_device, _dtype, _bool, memory_format]: ... +def _parse_to( + device: _device, + dtype: _dtype, + non_blocking: _bool, + copy: _bool, + *, + memory_format: memory_format, +) -> Tuple[_device, _dtype, _bool, memory_format]: ... @overload -def _parse_to(dtype: _dtype, non_blocking: _bool, copy: _bool, *, - memory_format: memory_format) -> Tuple[_device, _dtype, _bool, memory_format]: ... +def _parse_to( + dtype: _dtype, + non_blocking: _bool, + copy: _bool, + *, + memory_format: memory_format, +) -> Tuple[_device, _dtype, _bool, memory_format]: ... @overload -def _parse_to(tensor: Tensor, non_blocking: _bool, copy: _bool, *, - memory_format: memory_format) -> Tuple[_device, _dtype, _bool, memory_format]: ... +def _parse_to( + tensor: Tensor, + non_blocking: _bool, + copy: _bool, + *, + memory_format: memory_format, +) -> Tuple[_device, _dtype, _bool, memory_format]: ... # Defined in aten/src/ATen/naitve/PadSequence.cpp -def pad_sequence(sequences: List[Tensor], batch_first: bool = False, - padding_value: float = ...) -> Tensor: ... - +def pad_sequence( + sequences: List[Tensor], + batch_first: bool = False, + padding_value: float = ..., +) -> Tensor: ... def flatten_dense_tensors(tensors: List[Tensor]) -> Tensor: ... - def unflatten_dense_tensors(flat: Tensor, tensors: List[Tensor]) -> List[Tensor]: ... diff --git a/torch/_C/_profiler.pyi b/torch/_C/_profiler.pyi index 83adf8bc4e51..906b304e3083 100644 --- a/torch/_C/_profiler.pyi +++ b/torch/_C/_profiler.pyi @@ -53,7 +53,6 @@ class _ExperimentalConfig: profiler_measure_per_kernel: bool = ..., verbose: bool = ..., ) -> None: ... - ... class ProfilerConfig: def __init__( @@ -66,7 +65,6 @@ class ProfilerConfig: with_modules: bool, experimental_config: _ExperimentalConfig, ) -> None: ... - ... class _ProfilerEvent: start_tid: int diff --git a/torch/_C/return_types.pyi.in b/torch/_C/return_types.pyi.in index ca5e3f85f89e..00c5e6f918dc 100644 --- a/torch/_C/return_types.pyi.in +++ b/torch/_C/return_types.pyi.in @@ -1,8 +1,32 @@ # ${generated_comment} -from torch import Tensor, Generator, strided, memory_format, contiguous_format, strided, inf -from typing import List, Tuple, Optional, Union, Any, ContextManager, Callable, overload, Iterator, NamedTuple, Sequence, Literal, TypeVar +from typing import ( + Any, + Callable, + ContextManager, + Iterator, + List, + Literal, + NamedTuple, + Optional, + overload, + Sequence, + Tuple, + TypeVar, + Union, +) -from torch.types import _int, _float, _bool, Number, _dtype, _device, _qscheme, _size, _layout +from torch import contiguous_format, Generator, inf, memory_format, strided, Tensor +from torch.types import ( + _bool, + _device, + _dtype, + _float, + _int, + _layout, + _qscheme, + _size, + Number, +) ${namedtuple_defs} diff --git a/torch/fx/__init__.pyi b/torch/fx/__init__.pyi index ef6dc06066ef..8d063c1219b6 100644 --- a/torch/fx/__init__.pyi +++ b/torch/fx/__init__.pyi @@ -1,7 +1,11 @@ +from ._symbolic_trace import ( + symbolic_trace as symbolic_trace, + Tracer as Tracer, + wrap as wrap, +) from .graph import Graph as Graph from .graph_module import GraphModule as GraphModule -from .node import Node as Node, map_arg as map_arg -from .proxy import Proxy as Proxy -from ._symbolic_trace import Tracer as Tracer, symbolic_trace as symbolic_trace, wrap as wrap from .interpreter import Interpreter as Interpreter, Transformer as Transformer +from .node import map_arg as map_arg, Node as Node +from .proxy import Proxy as Proxy from .subgraph_rewriter import replace_pattern as replace_pattern diff --git a/torch/nn/functional.pyi.in b/torch/nn/functional.pyi.in index f3be7d4a989e..4b8f627396ef 100644 --- a/torch/nn/functional.pyi.in +++ b/torch/nn/functional.pyi.in @@ -1,7 +1,17 @@ +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + from torch import Tensor -from torch.types import _size, _dtype -from typing import Any, Optional, Tuple, Dict, List, Callable, Sequence, Union -from .common_types import _ratio_any_t, _size_any_t, _size_1_t, _size_2_t, _size_3_t, _size_2_opt_t, _size_3_opt_t +from torch.types import _dtype, _size + +from .common_types import ( + _ratio_any_t, + _size_1_t, + _size_2_opt_t, + _size_2_t, + _size_3_opt_t, + _size_3_t, + _size_any_t, +) # 'TypedDict' is a new accepted type that represents a dictionary with a fixed set of allowed keys. # It is standards-track but not in `typing` yet. We leave this hear to be uncommented once the feature @@ -15,7 +25,6 @@ from .common_types import _ratio_any_t, _size_any_t, _size_1_t, _size_2_t, _size GRID_SAMPLE_INTERPOLATION_MODES = Dict[str, int] GRID_SAMPLE_PADDING_MODES = Dict[str, int] - # These stubs were generated by running stubgen (`stubgen --parse-only functional.py`), followed by manual cleaning. # # The 'BroadcastingList{1,2,3}' types were replaced by `_size` or _output_ratio, as appropriate. @@ -26,374 +35,570 @@ GRID_SAMPLE_PADDING_MODES = Dict[str, int] # deleted from the stub and replaced by generated declarations. See `gen_pyi` for the implementation of the code # generation logic for those functions. In the future, it might be worth looking into using the mypy plugin system # to encode the type semantics of `_add_docstr`, should that system ever become widespread. -def fractional_max_pool2d_with_indices(input: Tensor, kernel_size: _size, output_size: Optional[_size] = ..., - output_ratio: Optional[_ratio_any_t] = ..., return_indices: bool = ..., - _random_samples: Optional[Tensor] = ...) -> Tuple[Tensor, Tensor]: ... - - -def fractional_max_pool3d_with_indices(input: Tensor, kernel_size: _size, output_size: Optional[_size] = ..., - output_ratio: Optional[_ratio_any_t] = ..., return_indices: bool = ..., - _random_samples: Optional[Tensor] = ...) -> Tuple[Tensor, Tensor]: ... - - -def max_pool1d_with_indices(input: Tensor, kernel_size: _size, stride: Optional[_size] = ..., padding: _size = ..., - dilation: _size = ..., ceil_mode: bool = ..., return_indices: bool = ...) -> Tuple[ - Tensor, Tensor]: ... - - -def max_pool2d_with_indices(input: Tensor, kernel_size: _size, stride: Optional[_size] = ..., padding: _size = ..., - dilation: _size = ..., ceil_mode: bool = ..., return_indices: bool = ...) -> Tuple[ - Tensor, Tensor]: ... - - -def max_pool3d_with_indices(input: Tensor, kernel_size: _size, stride: Optional[_size] = ..., padding: _size = ..., - dilation: _size = ..., ceil_mode: bool = ..., return_indices: bool = ...) -> Tuple[ - Tensor, Tensor]: ... - - -def max_unpool1d(input: Tensor, indices: Tensor, kernel_size: _size, stride: Optional[_size] = ..., - padding: _size = ..., output_size: Optional[_size] = ...) -> Tensor: ... - - -def max_unpool2d(input: Tensor, indices: Tensor, kernel_size: _size, stride: Optional[_size] = ..., - padding: _size = ..., output_size: Optional[_size] = ...) -> Tensor: ... - - -def max_unpool3d(input: Tensor, indices: Tensor, kernel_size: _size, stride: Optional[_size] = ..., - padding: _size = ..., output_size: Optional[_size] = ...) -> Tensor: ... - - -def lp_pool1d(input: Tensor, norm_type: float, kernel_size: _size_1_t, stride: Union[Optional[_size], Optional[int]] = ..., - ceil_mode: bool = ...) -> Tensor: ... - - -def lp_pool2d(input: Tensor, norm_type: float, kernel_size: _size_2_t, stride: Union[Optional[_size], Optional[int]] = ..., - ceil_mode: bool = ...) -> Tensor: ... - - -def adaptive_max_pool1d_with_indices(input: Tensor, output_size: _size, return_indices: bool = ...) -> Tuple[ - Tensor, Tensor]: ... - - -def adaptive_max_pool2d_with_indices(input: Tensor, output_size: _size_2_opt_t, return_indices: bool = ...) -> Tuple[ - Tensor, Tensor]: ... - - -def adaptive_max_pool3d_with_indices(input: Tensor, output_size: _size_3_opt_t, return_indices: bool = ...) -> Tuple[ - Tensor, Tensor]: ... - - +def fractional_max_pool2d_with_indices( + input: Tensor, + kernel_size: _size, + output_size: Optional[_size] = ..., + output_ratio: Optional[_ratio_any_t] = ..., + return_indices: bool = ..., + _random_samples: Optional[Tensor] = ..., +) -> Tuple[Tensor, Tensor]: ... +def fractional_max_pool3d_with_indices( + input: Tensor, + kernel_size: _size, + output_size: Optional[_size] = ..., + output_ratio: Optional[_ratio_any_t] = ..., + return_indices: bool = ..., + _random_samples: Optional[Tensor] = ..., +) -> Tuple[Tensor, Tensor]: ... +def max_pool1d_with_indices( + input: Tensor, + kernel_size: _size, + stride: Optional[_size] = ..., + padding: _size = ..., + dilation: _size = ..., + ceil_mode: bool = ..., + return_indices: bool = ..., +) -> Tuple[Tensor, Tensor]: ... +def max_pool2d_with_indices( + input: Tensor, + kernel_size: _size, + stride: Optional[_size] = ..., + padding: _size = ..., + dilation: _size = ..., + ceil_mode: bool = ..., + return_indices: bool = ..., +) -> Tuple[Tensor, Tensor]: ... +def max_pool3d_with_indices( + input: Tensor, + kernel_size: _size, + stride: Optional[_size] = ..., + padding: _size = ..., + dilation: _size = ..., + ceil_mode: bool = ..., + return_indices: bool = ..., +) -> Tuple[Tensor, Tensor]: ... +def max_unpool1d( + input: Tensor, + indices: Tensor, + kernel_size: _size, + stride: Optional[_size] = ..., + padding: _size = ..., + output_size: Optional[_size] = ..., +) -> Tensor: ... +def max_unpool2d( + input: Tensor, + indices: Tensor, + kernel_size: _size, + stride: Optional[_size] = ..., + padding: _size = ..., + output_size: Optional[_size] = ..., +) -> Tensor: ... +def max_unpool3d( + input: Tensor, + indices: Tensor, + kernel_size: _size, + stride: Optional[_size] = ..., + padding: _size = ..., + output_size: Optional[_size] = ..., +) -> Tensor: ... +def lp_pool1d( + input: Tensor, + norm_type: float, + kernel_size: _size_1_t, + stride: Union[Optional[_size], Optional[int]] = ..., + ceil_mode: bool = ..., +) -> Tensor: ... +def lp_pool2d( + input: Tensor, + norm_type: float, + kernel_size: _size_2_t, + stride: Union[Optional[_size], Optional[int]] = ..., + ceil_mode: bool = ..., +) -> Tensor: ... +def adaptive_max_pool1d_with_indices( + input: Tensor, + output_size: _size, + return_indices: bool = ..., +) -> Tuple[Tensor, Tensor]: ... +def adaptive_max_pool2d_with_indices( + input: Tensor, + output_size: _size_2_opt_t, + return_indices: bool = ..., +) -> Tuple[Tensor, Tensor]: ... +def adaptive_max_pool3d_with_indices( + input: Tensor, + output_size: _size_3_opt_t, + return_indices: bool = ..., +) -> Tuple[Tensor, Tensor]: ... def adaptive_avg_pool1d(input: Tensor, output_size: _size_1_t) -> Tensor: ... - - def adaptive_avg_pool2d(input: Tensor, output_size: _size_2_opt_t) -> Tensor: ... - - def adaptive_avg_pool3d(input: Tensor, output_size: _size_3_opt_t) -> Tensor: ... - - -def dropout(input: Tensor, p: float = ..., training: bool = ..., inplace: bool = ...) -> Tensor: ... - - -def alpha_dropout(input: Tensor, p: float = ..., training: bool = ..., inplace: bool = ...) -> Tensor: ... - - -def dropout1d(input: Tensor, p: float = ..., training: bool = ..., inplace: bool = ...) -> Tensor: ... - - -def dropout2d(input: Tensor, p: float = ..., training: bool = ..., inplace: bool = ...) -> Tensor: ... - - -def dropout3d(input: Tensor, p: float = ..., training: bool = ..., inplace: bool = ...) -> Tensor: ... - - -def feature_alpha_dropout(input: Tensor, p: float = ..., training: bool = ..., inplace: bool = ...) -> Tensor: ... - - -def threshold(input: Tensor, threshold: float, value: float, inplace: bool = ...) -> Tensor: ... - - +def dropout( + input: Tensor, + p: float = ..., + training: bool = ..., + inplace: bool = ..., +) -> Tensor: ... +def alpha_dropout( + input: Tensor, + p: float = ..., + training: bool = ..., + inplace: bool = ..., +) -> Tensor: ... +def dropout1d( + input: Tensor, + p: float = ..., + training: bool = ..., + inplace: bool = ..., +) -> Tensor: ... +def dropout2d( + input: Tensor, + p: float = ..., + training: bool = ..., + inplace: bool = ..., +) -> Tensor: ... +def dropout3d( + input: Tensor, + p: float = ..., + training: bool = ..., + inplace: bool = ..., +) -> Tensor: ... +def feature_alpha_dropout( + input: Tensor, + p: float = ..., + training: bool = ..., + inplace: bool = ..., +) -> Tensor: ... +def threshold( + input: Tensor, + threshold: float, + value: float, + inplace: bool = ..., +) -> Tensor: ... def relu(input: Tensor, inplace: bool = ...) -> Tensor: ... - - def glu(input: Tensor, dim: int = ...) -> Tensor: ... - - -def hardtanh(input: Tensor, min_val: float = ..., max_val: float = ..., inplace: bool = ...) -> Tensor: ... - - +def hardtanh( + input: Tensor, + min_val: float = ..., + max_val: float = ..., + inplace: bool = ..., +) -> Tensor: ... def relu6(input: Tensor, inplace: bool = ...) -> Tensor: ... - - def elu(input: Tensor, alpha: float = ..., inplace: bool = ...) -> Tensor: ... - - def selu(input: Tensor, inplace: bool = ...) -> Tensor: ... - - def celu(input: Tensor, alpha: float = ..., inplace: bool = ...) -> Tensor: ... - - -def leaky_relu(input: Tensor, negative_slope: float = ..., inplace: bool = ...) -> Tensor: ... - - +def leaky_relu( + input: Tensor, + negative_slope: float = ..., + inplace: bool = ..., +) -> Tensor: ... def prelu(input: Tensor, weight: Tensor) -> Tensor: ... - - -def rrelu(input: Tensor, lower: float = ..., upper: float = ..., training: bool = ..., - inplace: bool = ...) -> Tensor: ... - - +def rrelu( + input: Tensor, + lower: float = ..., + upper: float = ..., + training: bool = ..., + inplace: bool = ..., +) -> Tensor: ... def gelu(input: Any, approximate: str = ...): ... - - def hardshrink(input: Tensor, lambd: float = ...) -> Tensor: ... - - def tanhshrink(input: Any): ... - - def softsign(input: Any): ... - - -def softmin(input: Tensor, dim: Optional[int] = ..., _stacklevel: int = ..., dtype: Optional[_dtype] = ...) -> Tensor: ... - - -def softmax(input: Tensor, dim: Optional[int] = ..., _stacklevel: int = ..., dtype: Optional[_dtype] = ...) -> Tensor: ... - - -def gumbel_softmax(logits: Tensor, tau: float = ..., hard: bool = ..., eps: float = ..., dim: int = ...) -> Tensor: ... - - -def log_softmax(input: Tensor, dim: Optional[int] = ..., _stacklevel: int = ..., - dtype: Optional[_dtype] = ...) -> Tensor: ... - - +def softmin( + input: Tensor, + dim: Optional[int] = ..., + _stacklevel: int = ..., + dtype: Optional[_dtype] = ..., +) -> Tensor: ... +def softmax( + input: Tensor, + dim: Optional[int] = ..., + _stacklevel: int = ..., + dtype: Optional[_dtype] = ..., +) -> Tensor: ... +def gumbel_softmax( + logits: Tensor, + tau: float = ..., + hard: bool = ..., + eps: float = ..., + dim: int = ..., +) -> Tensor: ... +def log_softmax( + input: Tensor, + dim: Optional[int] = ..., + _stacklevel: int = ..., + dtype: Optional[_dtype] = ..., +) -> Tensor: ... def tanh(input: Any): ... - - def sigmoid(input: Any) -> Tensor: ... - def hardsigmoid(input: Tensor, inplace: bool = False) -> Tensor: ... - - def linear(input: Tensor, weight: Tensor, bias: Optional[Tensor] = ...) -> Tensor: ... - - -def bilinear(input1: Tensor, input2: Tensor, weight: Tensor, bias: Optional[Tensor] = ...) -> Tensor: ... - - +def bilinear( + input1: Tensor, + input2: Tensor, + weight: Tensor, + bias: Optional[Tensor] = ..., +) -> Tensor: ... def silu(input: Tensor, inplace: bool = False) -> Tensor: ... - def mish(input: Tensor, inplace: bool = False) -> Tensor: ... - def hardswish(input: Tensor, inplace: bool = False) -> Tensor: ... - - -def embedding(input: Tensor, weight: Tensor, padding_idx: Optional[int] = ..., max_norm: Optional[float] = ..., - norm_type: float = ..., scale_grad_by_freq: bool = ..., sparse: bool = ...) -> Tensor: ... - - -def embedding_bag(input: Tensor, weight: Tensor, offsets: Optional[Tensor] = ..., max_norm: Optional[float] = ..., - norm_type: float = ..., scale_grad_by_freq: bool = ..., mode: str = ..., - sparse: bool = ..., per_sample_weights: Optional[Tensor] = ..., - include_last_offset: bool = ..., padding_idx: Optional[int] = ...) -> Tensor: ... - -def batch_norm(input: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], - weight: Optional[Tensor] = ..., bias: Optional[Tensor] = ..., training: bool = ..., - momentum: float = ..., eps: float = ...) -> Tensor: ... - - -def instance_norm(input: Tensor, running_mean: Optional[Tensor] = ..., running_var: Optional[Tensor] = ..., - weight: Optional[Tensor] = ..., bias: Optional[Tensor] = ..., use_input_stats: bool = ..., - momentum: float = ..., eps: float = ...) -> Tensor: ... - - -def layer_norm(input: Tensor, normalized_shape: Sequence[int], weight: Optional[Tensor] = ..., bias: Optional[Tensor] = ..., - eps: float = ...) -> Tensor: ... - - -def group_norm(input: Tensor, num_groups: int, weight: Optional[Tensor] = ..., bias: Optional[Tensor] = ..., - eps: float = ...) -> Tensor: ... - - -def local_response_norm(input: Tensor, size: int, alpha: float = ..., beta: float = ..., k: float = ...) -> Tensor: ... - - -def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: int = ..., - reduction: str = ..., zero_infinity: bool = ...) -> Tensor: ... - - -def nll_loss(input: Tensor, target: Tensor, weight: Optional[Tensor] = ..., size_average: Optional[bool] = ..., - ignore_index: int = ..., reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ... - - -def poisson_nll_loss(input: Tensor, target: Tensor, log_input: bool = ..., full: bool = ..., - size_average: Optional[bool] = ..., eps: float = ..., reduce: Optional[bool] = ..., - reduction: str = ...) -> Tensor: ... - - -def gaussian_nll_loss(input: Tensor, target: Tensor, var: Tensor, full: Optional[bool] = ..., - eps: Optional[float] = ..., reduction: Optional[str] = ...) -> Tensor: ... - - -def kl_div(input: Tensor, target: Tensor, size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., - reduction: str = ..., log_target: bool = ...) -> Tensor: ... - - -def cross_entropy(input: Tensor, target: Tensor, weight: Optional[Tensor] = ..., size_average: Optional[bool] = ..., - ignore_index: int = ..., reduce: Optional[bool] = ..., reduction: str = ..., - label_smoothing: float = ...) -> Tensor: ... - - -def binary_cross_entropy(input: Tensor, target: Tensor, weight: Optional[Tensor] = ..., - size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., - reduction: str = ...) -> Tensor: ... - - -def binary_cross_entropy_with_logits(input: Tensor, target: Tensor, weight: Optional[Tensor] = ..., - size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., - reduction: str = ..., pos_weight: Optional[Tensor] = ...) -> Tensor: ... - - -def smooth_l1_loss(input: Tensor, target: Tensor, size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., - reduction: str = ..., beta: float = ...) -> Tensor: ... - - -def huber_loss(input: Tensor, target: Tensor, reduction: str = ..., delta: float = ...) -> Tensor: ... - - -def l1_loss(input: Tensor, target: Tensor, size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., - reduction: str = ...) -> Tensor: ... - - -def mse_loss(input: Tensor, target: Tensor, size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., - reduction: str = ...) -> Tensor: ... - - -def margin_ranking_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: float = ..., - size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., - reduction: str = ...) -> Tensor: ... - - -def hinge_embedding_loss(input: Tensor, target: Tensor, margin: float = ..., size_average: Optional[bool] = ..., - reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ... - - -def multilabel_margin_loss(input: Tensor, target: Tensor, size_average: Optional[bool] = ..., - reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ... - - -def soft_margin_loss(input: Tensor, target: Tensor, size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., - reduction: str = ...) -> Tensor: ... - - -def multilabel_soft_margin_loss(input: Tensor, target: Tensor, weight: Optional[Tensor] = ..., - size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., - reduction: str = ...) -> Tensor: ... - - -def cosine_embedding_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: float = ..., - size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., - reduction: str = ...) -> Tensor: ... - - -def multi_margin_loss(input: Tensor, target: Tensor, p: int = ..., margin: float = ..., weight: Optional[Tensor] = ..., - size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., - reduction: str = ...) -> Tensor: ... - - -def upsample(input: Any, size: Optional[Any] = ..., scale_factor: Optional[Any] = ..., mode: str = ..., - align_corners: Optional[Any] = ...): ... - - -def interpolate(input: Any, size: Optional[Any] = ..., scale_factor: Optional[Any] = ..., mode: str = ..., - align_corners: Optional[Any] = ..., recompute_scale_factor: Optional[Any] = ..., - antialias: bool = ...): ... - - -def upsample_nearest(input: Any, size: Optional[Any] = ..., scale_factor: Optional[Any] = ...): ... - - -def upsample_bilinear(input: Any, size: Optional[Any] = ..., scale_factor: Optional[Any] = ...): ... - - -def grid_sample(input: Tensor, grid: Tensor, mode: str = ..., padding_mode: str = ..., - align_corners: Optional[Any] = ...) -> Tensor: ... - - -def affine_grid(theta: Tensor, size: List[int], align_corners: Optional[Any] = ...) -> Tensor: ... - - -def pad(input: Tensor, pad: Sequence[int], mode: str = ..., value: float = ...) -> Tensor: ... - - -def pairwise_distance(x1: Tensor, x2: Tensor, p: float = ..., eps: float = ..., keepdim: bool = ...) -> Tensor: ... - - -def triplet_margin_loss(anchor: Tensor, positive: Tensor, negative: Tensor, margin: float = ..., p: float = ..., - eps: float = ..., swap: bool = ..., size_average: Optional[bool] = ..., - reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ... - - -def triplet_margin_with_distance_loss(anchor: Tensor, positive: Tensor, negative: Tensor, *, - distance_function: Optional[Callable[[Tensor, Tensor], Tensor]]=..., - margin: float=..., swap: bool=..., reduction: str=...) -> Tensor: ... - - -def normalize(input: Tensor, p: float = ..., dim: int = ..., eps: float = ..., - out: Optional[Tensor] = ...) -> Tensor: ... - - -def assert_int_or_pair(arg: Any, arg_name: Any, message: Any) -> None: ... - - -def unfold(input: Tensor, kernel_size: _size_any_t, dilation: _size_any_t = ..., padding: _size_any_t = ..., - stride: _size_any_t = ...) -> Tensor: ... - - -def fold(input: Tensor, output_size: _size_any_t, kernel_size: _size_any_t, dilation: _size_any_t = ..., padding: _size_any_t = ..., - stride: _size_any_t = ...) -> Tensor: ... - - +def embedding( + input: Tensor, + weight: Tensor, + padding_idx: Optional[int] = ..., + max_norm: Optional[float] = ..., + norm_type: float = ..., + scale_grad_by_freq: bool = ..., + sparse: bool = ..., +) -> Tensor: ... +def embedding_bag( + input: Tensor, + weight: Tensor, + offsets: Optional[Tensor] = ..., + max_norm: Optional[float] = ..., + norm_type: float = ..., + scale_grad_by_freq: bool = ..., + mode: str = ..., + sparse: bool = ..., + per_sample_weights: Optional[Tensor] = ..., + include_last_offset: bool = ..., + padding_idx: Optional[int] = ..., +) -> Tensor: ... +def batch_norm( + input: Tensor, + running_mean: Optional[Tensor], + running_var: Optional[Tensor], + weight: Optional[Tensor] = ..., + bias: Optional[Tensor] = ..., + training: bool = ..., + momentum: float = ..., + eps: float = ..., +) -> Tensor: ... +def instance_norm( + input: Tensor, + running_mean: Optional[Tensor] = ..., + running_var: Optional[Tensor] = ..., + weight: Optional[Tensor] = ..., + bias: Optional[Tensor] = ..., + use_input_stats: bool = ..., + momentum: float = ..., + eps: float = ..., +) -> Tensor: ... +def layer_norm( + input: Tensor, + normalized_shape: Sequence[int], + weight: Optional[Tensor] = ..., + bias: Optional[Tensor] = ..., + eps: float = ..., +) -> Tensor: ... +def group_norm( + input: Tensor, + num_groups: int, + weight: Optional[Tensor] = ..., + bias: Optional[Tensor] = ..., + eps: float = ..., +) -> Tensor: ... +def local_response_norm( + input: Tensor, + size: int, + alpha: float = ..., + beta: float = ..., + k: float = ..., +) -> Tensor: ... +def ctc_loss( + log_probs: Tensor, + targets: Tensor, + input_lengths: Tensor, + target_lengths: Tensor, + blank: int = ..., + reduction: str = ..., + zero_infinity: bool = ..., +) -> Tensor: ... +def nll_loss( + input: Tensor, + target: Tensor, + weight: Optional[Tensor] = ..., + size_average: Optional[bool] = ..., + ignore_index: int = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def poisson_nll_loss( + input: Tensor, + target: Tensor, + log_input: bool = ..., + full: bool = ..., + size_average: Optional[bool] = ..., + eps: float = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def gaussian_nll_loss( + input: Tensor, + target: Tensor, + var: Tensor, + full: Optional[bool] = ..., + eps: Optional[float] = ..., + reduction: Optional[str] = ..., +) -> Tensor: ... +def kl_div( + input: Tensor, + target: Tensor, + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., + log_target: bool = ..., +) -> Tensor: ... +def cross_entropy( + input: Tensor, + target: Tensor, + weight: Optional[Tensor] = ..., + size_average: Optional[bool] = ..., + ignore_index: int = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., + label_smoothing: float = ..., +) -> Tensor: ... +def binary_cross_entropy( + input: Tensor, + target: Tensor, + weight: Optional[Tensor] = ..., + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def binary_cross_entropy_with_logits( + input: Tensor, + target: Tensor, + weight: Optional[Tensor] = ..., + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., + pos_weight: Optional[Tensor] = ..., +) -> Tensor: ... +def smooth_l1_loss( + input: Tensor, + target: Tensor, + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., + beta: float = ..., +) -> Tensor: ... +def huber_loss( + input: Tensor, + target: Tensor, + reduction: str = ..., + delta: float = ..., +) -> Tensor: ... +def l1_loss( + input: Tensor, + target: Tensor, + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def mse_loss( + input: Tensor, + target: Tensor, + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def margin_ranking_loss( + input1: Tensor, + input2: Tensor, + target: Tensor, + margin: float = ..., + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def hinge_embedding_loss( + input: Tensor, + target: Tensor, + margin: float = ..., + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def multilabel_margin_loss( + input: Tensor, + target: Tensor, + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def soft_margin_loss( + input: Tensor, + target: Tensor, + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def multilabel_soft_margin_loss( + input: Tensor, + target: Tensor, + weight: Optional[Tensor] = ..., + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def cosine_embedding_loss( + input1: Tensor, + input2: Tensor, + target: Tensor, + margin: float = ..., + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def multi_margin_loss( + input: Tensor, + target: Tensor, + p: int = ..., + margin: float = ..., + weight: Optional[Tensor] = ..., + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def upsample( + input: Any, + size: Optional[Any] = ..., + scale_factor: Optional[Any] = ..., + mode: str = ..., + align_corners: Optional[Any] = ..., +): ... +def interpolate( + input: Any, + size: Optional[Any] = ..., + scale_factor: Optional[Any] = ..., + mode: str = ..., + align_corners: Optional[Any] = ..., + recompute_scale_factor: Optional[Any] = ..., + antialias: bool = ..., +): ... +def upsample_nearest( + input: Any, + size: Optional[Any] = ..., + scale_factor: Optional[Any] = ..., +): ... +def upsample_bilinear( + input: Any, + size: Optional[Any] = ..., + scale_factor: Optional[Any] = ..., +): ... +def grid_sample( + input: Tensor, + grid: Tensor, + mode: str = ..., + padding_mode: str = ..., + align_corners: Optional[Any] = ..., +) -> Tensor: ... +def affine_grid( + theta: Tensor, + size: List[int], + align_corners: Optional[Any] = ..., +) -> Tensor: ... +def pad( + input: Tensor, + pad: Sequence[int], + mode: str = ..., + value: float = ..., +) -> Tensor: ... +def pairwise_distance( + x1: Tensor, + x2: Tensor, + p: float = ..., + eps: float = ..., + keepdim: bool = ..., +) -> Tensor: ... +def triplet_margin_loss( + anchor: Tensor, + positive: Tensor, + negative: Tensor, + margin: float = ..., + p: float = ..., + eps: float = ..., + swap: bool = ..., + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def triplet_margin_with_distance_loss( + anchor: Tensor, + positive: Tensor, + negative: Tensor, + *, + distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = ..., + margin: float = ..., + swap: bool = ..., + reduction: str = ..., +) -> Tensor: ... +def normalize( + input: Tensor, + p: float = ..., + dim: int = ..., + eps: float = ..., + out: Optional[Tensor] = ..., +) -> Tensor: ... +def assert_int_or_pair( + arg: Any, + arg_name: Any, + message: Any, +) -> None: ... +def unfold( + input: Tensor, + kernel_size: _size_any_t, + dilation: _size_any_t = ..., + padding: _size_any_t = ..., + stride: _size_any_t = ..., +) -> Tensor: ... +def fold( + input: Tensor, + output_size: _size_any_t, + kernel_size: _size_any_t, + dilation: _size_any_t = ..., + padding: _size_any_t = ..., + stride: _size_any_t = ..., +) -> Tensor: ... def _canonical_mask( - mask: Optional[Tensor], - mask_name: str, - other_type: Optional[_dtype], - other_name: str, - target_type: _dtype, - check_other: bool = True, + mask: Optional[Tensor], + mask_name: str, + other_type: Optional[_dtype], + other_name: str, + target_type: _dtype, + check_other: bool = True, ) -> Optional[Tensor]: ... - def _none_or_dtype(input: Optional[Tensor]) -> Optional[_dtype]: ... - -def multi_head_attention_forward(query: Tensor, - key: Tensor, - value: Tensor, - embed_dim_to_check: int, - num_heads: int, - in_proj_weight: Optional[Tensor], - in_proj_bias: Optional[Tensor], - bias_k: Optional[Tensor], - bias_v: Optional[Tensor], - add_zero_attn: bool, - dropout_p: float, - out_proj_weight: Tensor, - out_proj_bias: Optional[Tensor], - training: bool = True, - key_padding_mask: Optional[Tensor] = None, - need_weights: bool = True, - attn_mask: Optional[Tensor] = None, - use_separate_proj_weight: bool = False, - q_proj_weight: Optional[Tensor] = None, - k_proj_weight: Optional[Tensor] = None, - v_proj_weight: Optional[Tensor] = None, - static_k: Optional[Tensor] = None, - static_v: Optional[Tensor] = None, - average_attn_weights: bool = True, - is_causal: bool = False - ) -> Tuple[Tensor, Optional[Tensor]]: ... - +def multi_head_attention_forward( + query: Tensor, + key: Tensor, + value: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Optional[Tensor], + in_proj_bias: Optional[Tensor], + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Optional[Tensor], + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + average_attn_weights: bool = True, + is_causal: bool = False, +) -> Tuple[Tensor, Optional[Tensor]]: ... ${imported_hints} diff --git a/torch/nn/parallel/__init__.pyi b/torch/nn/parallel/__init__.pyi index c06985d3baf7..1a007d339c39 100644 --- a/torch/nn/parallel/__init__.pyi +++ b/torch/nn/parallel/__init__.pyi @@ -1,4 +1,4 @@ -from .data_parallel import DataParallel as DataParallel, data_parallel as data_parallel +from .data_parallel import data_parallel as data_parallel, DataParallel as DataParallel from .distributed import DistributedDataParallel as DistributedDataParallel from .parallel_apply import parallel_apply as parallel_apply from .replicate import replicate as replicate diff --git a/torch/nn/parallel/common_types.pyi b/torch/nn/parallel/common_types.pyi index c74181d8ea0b..99a7ef6bf0e7 100644 --- a/torch/nn/parallel/common_types.pyi +++ b/torch/nn/parallel/common_types.pyi @@ -1,5 +1,6 @@ -from typing import Union, Sequence -from ... import device +from typing import Sequence, Union + +from torch import device _device_t = Union[int, device] _devices_t = Sequence[_device_t] diff --git a/torch/nn/parallel/data_parallel.pyi b/torch/nn/parallel/data_parallel.pyi index 2beb6f17a6ad..60eda6833bb6 100644 --- a/torch/nn/parallel/data_parallel.pyi +++ b/torch/nn/parallel/data_parallel.pyi @@ -1,7 +1,8 @@ from typing import Any, Optional -from .common_types import _devices_t, _device_t + +from torch import device, Tensor from ..modules import Module -from ... import device, Tensor +from .common_types import _device_t, _devices_t class DataParallel(Module): module: Module = ... @@ -10,10 +11,19 @@ class DataParallel(Module): output_device: _device_t = ... src_device_obj: device = ... - def __init__(self, module: Module, device_ids: Optional[_devices_t] = ..., output_device: Optional[_device_t] = ..., - dim: int = ...) -> None: ... + def __init__( + self, + module: Module, + device_ids: Optional[_devices_t] = ..., + output_device: Optional[_device_t] = ..., + dim: int = ..., + ) -> None: ... - -def data_parallel(module: Module, inputs: Any, device_ids: Optional[_devices_t] = ..., - output_device: Optional[_device_t] = ..., dim: int = ..., - module_kwargs: Optional[Any] = ...) -> Tensor: ... +def data_parallel( + module: Module, + inputs: Any, + device_ids: Optional[_devices_t] = ..., + output_device: Optional[_device_t] = ..., + dim: int = ..., + module_kwargs: Optional[Any] = ..., +) -> Tensor: ... diff --git a/torch/nn/parallel/parallel_apply.pyi b/torch/nn/parallel/parallel_apply.pyi index 75db45e85281..f2f0410fc525 100644 --- a/torch/nn/parallel/parallel_apply.pyi +++ b/torch/nn/parallel/parallel_apply.pyi @@ -1,7 +1,11 @@ -from typing import Any, Optional, Sequence, List -from .common_types import _devices_t +from typing import Any, List, Optional, Sequence + from ..modules import Module +from .common_types import _devices_t - -def parallel_apply(modules: Sequence[Module], inputs: Sequence[Any], kwargs_tup: Optional[Any] = ..., - devices: Optional[_devices_t] = ...) -> List[Any]: ... +def parallel_apply( + modules: Sequence[Module], + inputs: Sequence[Any], + kwargs_tup: Optional[Any] = ..., + devices: Optional[_devices_t] = ..., +) -> List[Any]: ... diff --git a/torch/nn/parallel/replicate.pyi b/torch/nn/parallel/replicate.pyi index 52962e47de6c..a32c9f16ecbc 100644 --- a/torch/nn/parallel/replicate.pyi +++ b/torch/nn/parallel/replicate.pyi @@ -1,7 +1,10 @@ -from typing import List, Union, Sequence, TypeVar +from typing import List, Sequence, Union + from ..modules import Module from .common_types import _devices_t - -def replicate(network: Module, devices: Union[_devices_t, Sequence[_devices_t]], detach: bool = ...) -> List[ - Module]: ... +def replicate( + network: Module, + devices: Union[_devices_t, Sequence[_devices_t]], + detach: bool = ..., +) -> List[Module]: ... diff --git a/torch/nn/parallel/scatter_gather.pyi b/torch/nn/parallel/scatter_gather.pyi index c49011a8d46a..c46495bff060 100644 --- a/torch/nn/parallel/scatter_gather.pyi +++ b/torch/nn/parallel/scatter_gather.pyi @@ -1,13 +1,17 @@ -from typing import Any, Dict, List, Tuple, overload, TypeVar -from ... import Tensor +from typing import Any, Dict, List, overload, Tuple, TypeVar + +from torch import Tensor from .common_types import _device_t, _devices_t - -T = TypeVar('T', Dict, List, Tuple) +T = TypeVar("T", Dict, List, Tuple) # For some reason, 'scatter' returns a tuple when given a single Tensor input but a list otherwise. @overload -def scatter(inputs: Tensor, target_gpus: _devices_t, dim: int = ...) -> Tuple[Tensor, ...]: ... +def scatter( + inputs: Tensor, + target_gpus: _devices_t, + dim: int = ..., +) -> Tuple[Tensor, ...]: ... # flake8 will raise a spurious error here since `torch/__init__.pyi` has not been generated yet # so mypy will interpret `Tensor` as `Any` since it is an import from what it believes to be an @@ -16,9 +20,11 @@ def scatter(inputs: Tensor, target_gpus: _devices_t, dim: int = ...) -> Tuple[Te @overload def scatter(inputs: T, target_gpus: _devices_t, dim: int = ...) -> List[T]: ... - # TODO More precise types here. -def scatter_kwargs(inputs: Any, kwargs: Any, target_gpus: _devices_t, dim: int = ...) -> Any: ... - - +def scatter_kwargs( + inputs: Any, + kwargs: Any, + target_gpus: _devices_t, + dim: int = ..., +) -> Any: ... def gather(outputs: Any, target_device: _device_t, dim: int = ...) -> Any: ... diff --git a/torch/nn/parameter.pyi b/torch/nn/parameter.pyi index 747a4a466293..219bb6d4efa2 100644 --- a/torch/nn/parameter.pyi +++ b/torch/nn/parameter.pyi @@ -1,23 +1,40 @@ -import torch -from .. import Tensor -from typing import Tuple, Optional import builtins +from typing import Optional, Tuple + +import torch +from torch import Tensor class Parameter(Tensor): - def __init__(self, data: Tensor=..., requires_grad: builtins.bool=...): ... - - ... + def __init__( + self, + data: Tensor = ..., + requires_grad: builtins.bool = ..., + ): ... def is_lazy(param: Tensor): ... class UninitializedParameter(Tensor): - def __init__(self, data: Tensor=..., requires_grad: builtins.bool=...): ... - - def materialize(self, shape: Tuple[int, ...], device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None): ... - ... + def __init__( + self, + data: Tensor = ..., + requires_grad: builtins.bool = ..., + ): ... + def materialize( + self, + shape: Tuple[int, ...], + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): ... class UninitializedBuffer(Tensor): - def __init__(self, data: Tensor=..., requires_grad: builtins.bool=...): ... - - def materialize(self, shape: Tuple[int, ...], device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None): ... - ... + def __init__( + self, + data: Tensor = ..., + requires_grad: builtins.bool = ..., + ): ... + def materialize( + self, + shape: Tuple[int, ...], + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): ... diff --git a/torch/nn/utils/rnn.pyi b/torch/nn/utils/rnn.pyi index d337caa7af36..b161be6ea950 100644 --- a/torch/nn/utils/rnn.pyi +++ b/torch/nn/utils/rnn.pyi @@ -1,13 +1,13 @@ from typing import ( Any, List, + NamedTuple, Optional, + overload, Sequence, Tuple, TypeVar, Union, - NamedTuple, - overload, ) from torch import Tensor @@ -21,68 +21,78 @@ class PackedSequence_(NamedTuple): def bind(optional: Any, fn: Any): ... - -T = TypeVar('T') - +T = TypeVar("T") class PackedSequence(PackedSequence_): - def __new__(cls, data: Tensor, batch_sizes: Optional[Tensor] = ..., sorted_indices: Optional[Tensor] = ..., - unsorted_indices: Optional[Tensor] = ...) -> PackedSequence: ... - + def __new__( + cls, + data: Tensor, + batch_sizes: Optional[Tensor] = ..., + sorted_indices: Optional[Tensor] = ..., + unsorted_indices: Optional[Tensor] = ..., + ) -> PackedSequence: ... def pin_memory(self: T) -> T: ... - def cuda(self: T, *args: Any, **kwargs: Any) -> T: ... - def cpu(self: T) -> T: ... - def double(self: T) -> T: ... - def float(self: T) -> T: ... - def half(self: T) -> T: ... - def long(self: T) -> T: ... - def int(self: T) -> T: ... - def short(self: T) -> T: ... - def char(self: T) -> T: ... - def byte(self: T) -> T: ... - @overload - def to(self: T, dtype: _dtype, non_blocking: bool = False, copy: bool = False) -> T: ... - + def to( + self: T, + dtype: _dtype, + non_blocking: bool = False, + copy: bool = False, + ) -> T: ... @overload - def to(self: T, device: Optional[Union[_device, str]] = None, dtype: Optional[_dtype] = None, - non_blocking: bool = False, copy: bool = False) -> T: ... - + def to( + self: T, + device: Optional[Union[_device, str]] = None, + dtype: Optional[_dtype] = None, + non_blocking: bool = False, + copy: bool = False, + ) -> T: ... @overload - def to(self, other: Tensor, non_blocking: bool = False, copy: bool = False) -> T: ... - + def to( + self, + other: Tensor, + non_blocking: bool = False, + copy: bool = False, + ) -> T: ... @property def is_cuda(self) -> bool: ... - def is_pinned(self) -> bool: ... - def invert_permutation(permutation: Optional[Tensor]): ... - - -def pack_padded_sequence(input: Tensor, lengths: Tensor, batch_first: bool = ..., - enforce_sorted: bool = ...) -> PackedSequence: ... - - -def pad_packed_sequence(sequence: PackedSequence, batch_first: bool = ..., padding_value: float = ..., - total_length: Optional[int] = ...) -> Tuple[Tensor, ...]: ... - - -def pad_sequence(sequences: List[Tensor], batch_first: bool = False, padding_value: float = ...) -> Tensor: ... - - -def pack_sequence(sequences: Sequence[Tensor], enforce_sorted: bool = ...) -> PackedSequence: ... - - -def get_packed_sequence(data: Tensor, batch_sizes: Optional[Tensor], sorted_indices: Optional[Tensor], - unsorted_indices: Optional[Tensor]) -> PackedSequence: ... +def pack_padded_sequence( + input: Tensor, + lengths: Tensor, + batch_first: bool = ..., + enforce_sorted: bool = ..., +) -> PackedSequence: ... +def pad_packed_sequence( + sequence: PackedSequence, + batch_first: bool = ..., + padding_value: float = ..., + total_length: Optional[int] = ..., +) -> Tuple[Tensor, ...]: ... +def pad_sequence( + sequences: List[Tensor], + batch_first: bool = False, + padding_value: float = ..., +) -> Tensor: ... +def pack_sequence( + sequences: Sequence[Tensor], + enforce_sorted: bool = ..., +) -> PackedSequence: ... +def get_packed_sequence( + data: Tensor, + batch_sizes: Optional[Tensor], + sorted_indices: Optional[Tensor], + unsorted_indices: Optional[Tensor], +) -> PackedSequence: ... diff --git a/torch/optim/__init__.pyi b/torch/optim/__init__.pyi index 5924f3101931..8d35bab14c20 100644 --- a/torch/optim/__init__.pyi +++ b/torch/optim/__init__.pyi @@ -1,5 +1,4 @@ -from . import swa_utils as swa_utils -from . import lr_scheduler as lr_scheduler +from . import lr_scheduler as lr_scheduler, swa_utils as swa_utils from .adadelta import Adadelta as Adadelta from .adagrad import Adagrad as Adagrad from .adam import Adam as Adam diff --git a/torch/optim/_multi_tensor/__init__.pyi b/torch/optim/_multi_tensor/__init__.pyi index fec9f9ae782c..97c3e2df9893 100644 --- a/torch/optim/_multi_tensor/__init__.pyi +++ b/torch/optim/_multi_tensor/__init__.pyi @@ -1,4 +1,5 @@ from functools import partial + from torch import optim Adam = partial(optim.Adam, foreach=True) diff --git a/torch/optim/adadelta.pyi b/torch/optim/adadelta.pyi index 15fc6c759aff..8eff5b390a41 100644 --- a/torch/optim/adadelta.pyi +++ b/torch/optim/adadelta.pyi @@ -1,5 +1,11 @@ -from typing import Tuple from .optimizer import _params_t, Optimizer class Adadelta(Optimizer): - def __init__(self, params: _params_t, lr: float=..., rho: float=..., eps: float=..., weight_decay: float=...) -> None: ... + def __init__( + self, + params: _params_t, + lr: float = ..., + rho: float = ..., + eps: float = ..., + weight_decay: float = ..., + ) -> None: ... diff --git a/torch/optim/adagrad.pyi b/torch/optim/adagrad.pyi index 4020a2f11a96..895f54b7e133 100644 --- a/torch/optim/adagrad.pyi +++ b/torch/optim/adagrad.pyi @@ -1,5 +1,12 @@ -from typing import Tuple from .optimizer import _params_t, Optimizer class Adagrad(Optimizer): - def __init__(self, params: _params_t, lr: float=..., lr_decay: float=..., weight_decay: float=..., initial_accumulator_value: float=..., eps: float=...) -> None: ... + def __init__( + self, + params: _params_t, + lr: float = ..., + lr_decay: float = ..., + weight_decay: float = ..., + initial_accumulator_value: float = ..., + eps: float = ..., + ) -> None: ... diff --git a/torch/optim/adam.pyi b/torch/optim/adam.pyi index 6fde30275a3a..b4efafaef2b2 100644 --- a/torch/optim/adam.pyi +++ b/torch/optim/adam.pyi @@ -1,5 +1,20 @@ -from typing import Tuple, Optional +from typing import Optional, Tuple + from .optimizer import _params_t, Optimizer class Adam(Optimizer): - def __init__(self, params: _params_t, lr: float=..., betas: Tuple[float, float]=..., eps: float=..., weight_decay: float=..., amsgrad: bool = ..., *, foreach: Optional[bool] = ..., maximize: bool = ..., capturable: bool = ..., differentiable: bool = ..., fused: bool = ...) -> None: ... + def __init__( + self, + params: _params_t, + lr: float = ..., + betas: Tuple[float, float] = ..., + eps: float = ..., + weight_decay: float = ..., + amsgrad: bool = ..., + *, + foreach: Optional[bool] = ..., + maximize: bool = ..., + capturable: bool = ..., + differentiable: bool = ..., + fused: bool = ..., + ) -> None: ... diff --git a/torch/optim/adamax.pyi b/torch/optim/adamax.pyi index 209d3d3270fe..c74859f581bd 100644 --- a/torch/optim/adamax.pyi +++ b/torch/optim/adamax.pyi @@ -1,5 +1,13 @@ from typing import Tuple + from .optimizer import _params_t, Optimizer class Adamax(Optimizer): - def __init__(self, params: _params_t, lr: float=..., betas: Tuple[float, float]=..., eps: float=..., weight_decay: float=...) -> None: ... + def __init__( + self, + params: _params_t, + lr: float = ..., + betas: Tuple[float, float] = ..., + eps: float = ..., + weight_decay: float = ..., + ) -> None: ... diff --git a/torch/optim/adamw.pyi b/torch/optim/adamw.pyi index 5c8843568886..a9427f038619 100644 --- a/torch/optim/adamw.pyi +++ b/torch/optim/adamw.pyi @@ -1,5 +1,20 @@ -from typing import Tuple, Optional +from typing import Optional, Tuple + from .optimizer import _params_t, Optimizer class AdamW(Optimizer): - def __init__(self, params: _params_t, lr: float=..., betas: Tuple[float, float]=..., eps: float=..., weight_decay: float=..., amsgrad: bool = ..., *, foreach: Optional[bool] = ..., maximize: bool = ..., capturable: bool = ..., differentiable: bool = ..., fused: bool = ...) -> None: ... + def __init__( + self, + params: _params_t, + lr: float = ..., + betas: Tuple[float, float] = ..., + eps: float = ..., + weight_decay: float = ..., + amsgrad: bool = ..., + *, + foreach: Optional[bool] = ..., + maximize: bool = ..., + capturable: bool = ..., + differentiable: bool = ..., + fused: bool = ..., + ) -> None: ... diff --git a/torch/optim/asgd.pyi b/torch/optim/asgd.pyi index f2376cca37cf..dd64e34057e4 100644 --- a/torch/optim/asgd.pyi +++ b/torch/optim/asgd.pyi @@ -1,5 +1,12 @@ -from typing import Tuple from .optimizer import _params_t, Optimizer class ASGD(Optimizer): - def __init__(self, params: _params_t, lr: float=..., lambd: float=..., alpha: float=..., t0: float=..., weight_decay: float=...) -> None: ... + def __init__( + self, + params: _params_t, + lr: float = ..., + lambd: float = ..., + alpha: float = ..., + t0: float = ..., + weight_decay: float = ..., + ) -> None: ... diff --git a/torch/optim/lbfgs.pyi b/torch/optim/lbfgs.pyi index 791e6107510a..f55838e02be9 100644 --- a/torch/optim/lbfgs.pyi +++ b/torch/optim/lbfgs.pyi @@ -1,5 +1,16 @@ -from typing import Tuple, Optional +from typing import Optional + from .optimizer import _params_t, Optimizer class LBFGS(Optimizer): - def __init__(self, params: _params_t, lr: float=..., max_iter: int=..., max_eval: Optional[int]=..., tolerance_grad: float=..., tolerance_change: float=..., history_size: int=..., line_search_fn: Optional[str]=...) -> None: ... + def __init__( + self, + params: _params_t, + lr: float = ..., + max_iter: int = ..., + max_eval: Optional[int] = ..., + tolerance_grad: float = ..., + tolerance_change: float = ..., + history_size: int = ..., + line_search_fn: Optional[str] = ..., + ) -> None: ... diff --git a/torch/optim/lr_scheduler.pyi b/torch/optim/lr_scheduler.pyi index 35299fbcaee3..8c81d6ecfb43 100644 --- a/torch/optim/lr_scheduler.pyi +++ b/torch/optim/lr_scheduler.pyi @@ -1,4 +1,5 @@ -from typing import Iterable, Any, Optional, Callable, Union, List, Dict +from typing import Any, Callable, Dict, Iterable, List, Optional, Union + from .optimizer import Optimizer class LRScheduler: @@ -6,60 +7,131 @@ class LRScheduler: base_lrs: List[float] = ... last_epoch: int = ... verbose: bool = ... - def __init__(self, optimizer: Optimizer, last_epoch: int = ..., verbose: bool = ...) -> None: ... + def __init__( + self, + optimizer: Optimizer, + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... def state_dict(self) -> Dict[str, Any]: ... def load_state_dict(self, state_dict: Dict[str, Any]) -> None: ... def get_last_lr(self) -> List[float]: ... def get_lr(self) -> float: ... def step(self, epoch: Optional[int] = ...) -> None: ... - def print_lr(self, is_verbose: bool, group: Dict[str, Any], lr: float, epoch: Optional[int] = ...) -> None: ... + def print_lr( + self, + is_verbose: bool, + group: Dict[str, Any], + lr: float, + epoch: Optional[int] = ..., + ) -> None: ... -class _LRScheduler(LRScheduler): - ... +class _LRScheduler(LRScheduler): ... class LambdaLR(LRScheduler): lr_lambdas: List[Callable[[int], float]] = ... - def __init__(self, optimizer: Optimizer, lr_lambda: Union[Callable[[int], float], List[Callable[[int], float]]], last_epoch: int = ..., verbose: bool = ...) -> None: ... + def __init__( + self, + optimizer: Optimizer, + lr_lambda: Union[Callable[[int], float], List[Callable[[int], float]]], + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... class MultiplicativeLR(LRScheduler): lr_lambdas: List[Callable[[int], float]] = ... - def __init__(self, optimizer: Optimizer, lr_lambda: Union[Callable[[int], float], List[Callable[[int], float]]], last_epoch: int = ..., verbose: bool = ...) -> None: ... + def __init__( + self, + optimizer: Optimizer, + lr_lambda: Union[Callable[[int], float], List[Callable[[int], float]]], + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... class StepLR(LRScheduler): step_size: int = ... gamma: float = ... - def __init__(self, optimizer: Optimizer, step_size: int, gamma: float = ..., last_epoch: int = ..., verbose: bool = ...) -> None: ... + def __init__( + self, + optimizer: Optimizer, + step_size: int, + gamma: float = ..., + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... class MultiStepLR(LRScheduler): milestones: Iterable[int] = ... gamma: float = ... - def __init__(self, optimizer: Optimizer, milestones: Iterable[int], gamma: float = ..., last_epoch: int = ..., verbose: bool = ...) -> None: ... + def __init__( + self, + optimizer: Optimizer, + milestones: Iterable[int], + gamma: float = ..., + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... class ConstantLR(LRScheduler): factor: float = ... total_iters: int = ... - def __init__(self, optimizer: Optimizer, factor: float=..., total_iters: int=..., last_epoch: int=..., verbose: bool = ...) -> None: ... + def __init__( + self, + optimizer: Optimizer, + factor: float = ..., + total_iters: int = ..., + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... class LinearLR(LRScheduler): start_factor: float = ... end_factor: float = ... total_iters: int = ... - def __init__(self, optimizer: Optimizer, start_factor: float=..., end_factor: float= ..., total_iters: int= ..., last_epoch: int= ..., verbose: bool = ...) -> None: ... + def __init__( + self, + optimizer: Optimizer, + start_factor: float = ..., + end_factor: float = ..., + total_iters: int = ..., + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... class ExponentialLR(LRScheduler): gamma: float = ... - def __init__(self, optimizer: Optimizer, gamma: float, last_epoch: int = ..., verbose: bool = ...) -> None: ... + def __init__( + self, + optimizer: Optimizer, + gamma: float, + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... class ChainedScheduler(LRScheduler): def __init__(self, schedulers: List[LRScheduler]) -> None: ... class SequentialLR(LRScheduler): - def __init__(self, optimizer: Optimizer, schedulers: List[LRScheduler], milestones: List[int], last_epoch: int=..., verbose: bool=...) -> None: ... + def __init__( + self, + optimizer: Optimizer, + schedulers: List[LRScheduler], + milestones: List[int], + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... class CosineAnnealingLR(LRScheduler): T_max: int = ... eta_min: float = ... - def __init__(self, optimizer: Optimizer, T_max: int, eta_min: float = ..., last_epoch: int = ..., verbose: bool = ...) -> None: ... + def __init__( + self, + optimizer: Optimizer, + T_max: int, + eta_min: float = ..., + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... class ReduceLROnPlateau: factor: float = ... @@ -77,7 +149,19 @@ class ReduceLROnPlateau: mode_worse: Optional[float] = ... eps: float = ... last_epoch: int = ... - def __init__(self, optimizer: Optimizer, mode: str = ..., factor: float = ..., patience: int = ..., threshold: float = ..., threshold_mode: str = ..., cooldown: int = ..., min_lr: Union[List[float], float] = ..., eps: float = ..., verbose: bool = ...) -> None: ... + def __init__( + self, + optimizer: Optimizer, + mode: str = ..., + factor: float = ..., + patience: int = ..., + threshold: float = ..., + threshold_mode: str = ..., + cooldown: int = ..., + min_lr: Union[List[float], float] = ..., + eps: float = ..., + verbose: bool = ..., + ) -> None: ... def step(self, metrics: Any, epoch: Optional[int] = ...) -> None: ... @property def in_cooldown(self) -> bool: ... @@ -95,7 +179,23 @@ class CyclicLR(LRScheduler): cycle_momentum: bool = ... base_momentums: List[float] = ... max_momentums: List[float] = ... - def __init__(self, optimizer: Optimizer, base_lr: Union[float, List[float]], max_lr: Union[float, List[float]], step_size_up: int = ..., step_size_down: Optional[int] = ..., mode: str = ..., gamma: float = ..., scale_fn: Optional[Callable[[float], float]] = ..., scale_mode: str = ..., cycle_momentum: bool = ..., base_momentum: float = ..., max_momentum: float = ..., last_epoch: int = ..., verbose: bool = ...) -> None: ... + def __init__( + self, + optimizer: Optimizer, + base_lr: Union[float, List[float]], + max_lr: Union[float, List[float]], + step_size_up: int = ..., + step_size_down: Optional[int] = ..., + mode: str = ..., + gamma: float = ..., + scale_fn: Optional[Callable[[float], float]] = ..., + scale_mode: str = ..., + cycle_momentum: bool = ..., + base_momentum: float = ..., + max_momentum: float = ..., + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... def scale_fn(self, x: Any) -> float: ... class CosineAnnealingWarmRestarts(LRScheduler): @@ -104,7 +204,15 @@ class CosineAnnealingWarmRestarts(LRScheduler): T_mult: Optional[int] = ... eta_min: Optional[float] = ... T_cur: Any = ... - def __init__(self, optimizer: Optimizer, T_0: int, T_mult: int = ..., eta_min: float = ..., last_epoch: int = ..., verbose: bool = ...) -> None: ... + def __init__( + self, + optimizer: Optimizer, + T_0: int, + T_mult: int = ..., + eta_min: float = ..., + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... def step(self, epoch: Optional[Any] = ...): ... class OneCycleLR(LRScheduler): @@ -112,9 +220,33 @@ class OneCycleLR(LRScheduler): anneal_func: Callable[[float, float, float], float] = ... cycle_momentum: bool = ... use_beta1: bool = ... - def __init__(self, optimizer: Optimizer, max_lr: Union[float, List[float]], total_steps: int = ..., epochs: int = ..., steps_per_epoch: int = ..., pct_start: float = ..., anneal_strategy: str = ..., cycle_momentum: bool = ..., base_momentum: Union[float, List[float]] = ..., max_momentum: Union[float, List[float]] = ..., div_factor: float = ..., final_div_factor: float = ..., three_phase: bool = ..., last_epoch: int = ..., verbose: bool = ...) -> None: ... + def __init__( + self, + optimizer: Optimizer, + max_lr: Union[float, List[float]], + total_steps: int = ..., + epochs: int = ..., + steps_per_epoch: int = ..., + pct_start: float = ..., + anneal_strategy: str = ..., + cycle_momentum: bool = ..., + base_momentum: Union[float, List[float]] = ..., + max_momentum: Union[float, List[float]] = ..., + div_factor: float = ..., + final_div_factor: float = ..., + three_phase: bool = ..., + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... class PolynomialLR(LRScheduler): total_iters: int = ... power: float = ... - def __init__(self, optimizer: Optimizer, total_iters: int = ..., power: float = ..., last_epoch: int = ..., verbose: bool = ...) -> None: ... + def __init__( + self, + optimizer: Optimizer, + total_iters: int = ..., + power: float = ..., + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... diff --git a/torch/optim/nadam.pyi b/torch/optim/nadam.pyi index a9a23e3143a8..41eb4366d689 100644 --- a/torch/optim/nadam.pyi +++ b/torch/optim/nadam.pyi @@ -1,5 +1,14 @@ from typing import Tuple + from .optimizer import _params_t, Optimizer class NAdam(Optimizer): - def __init__(self, params: _params_t, lr: float=..., betas: Tuple[float, float]=..., eps: float=..., weight_decay: float=..., momentum_decay: float=...) -> None: ... + def __init__( + self, + params: _params_t, + lr: float = ..., + betas: Tuple[float, float] = ..., + eps: float = ..., + weight_decay: float = ..., + momentum_decay: float = ..., + ) -> None: ... diff --git a/torch/optim/optimizer.pyi b/torch/optim/optimizer.pyi index 47fdcce7e3db..a489f8c296aa 100644 --- a/torch/optim/optimizer.pyi +++ b/torch/optim/optimizer.pyi @@ -1,11 +1,11 @@ -from typing import Iterable, Union, Callable, Optional, List, Dict, Any -from .. import Tensor +from typing import Any, Callable, Dict, Iterable, List, Optional, Union + +from torch import Tensor from torch.utils.hooks import RemovableHandle _params_t = Union[Iterable[Tensor], Iterable[Dict[str, Any]]] def register_optimizer_step_pre_hook(hook: Callable[..., None]) -> RemovableHandle: ... - def register_optimizer_step_post_hook(hook: Callable[..., None]) -> RemovableHandle: ... class Optimizer: @@ -19,6 +19,6 @@ class Optimizer: def register_step_post_hook(self, hook: Callable[..., None]) -> RemovableHandle: ... def state_dict(self) -> Dict[str, Any]: ... def load_state_dict(self, state_dict: Dict[str, Any]) -> None: ... - def zero_grad(self, set_to_none: bool=...) -> None: ... - def step(self, closure: Optional[Callable[[], float]]=...) -> Optional[float]: ... + def zero_grad(self, set_to_none: bool = ...) -> None: ... + def step(self, closure: Optional[Callable[[], float]] = ...) -> Optional[float]: ... def add_param_group(self, param_group: Dict[str, Any]) -> None: ... diff --git a/torch/optim/radam.pyi b/torch/optim/radam.pyi index 1bc77ced060f..18ec03c4812e 100644 --- a/torch/optim/radam.pyi +++ b/torch/optim/radam.pyi @@ -1,5 +1,13 @@ from typing import Tuple + from .optimizer import _params_t, Optimizer class RAdam(Optimizer): - def __init__(self, params: _params_t, lr: float=..., betas: Tuple[float, float]=..., eps: float=..., weight_decay: float=...) -> None: ... + def __init__( + self, + params: _params_t, + lr: float = ..., + betas: Tuple[float, float] = ..., + eps: float = ..., + weight_decay: float = ..., + ) -> None: ... diff --git a/torch/optim/rmsprop.pyi b/torch/optim/rmsprop.pyi index 6f7f9166160f..d37b2f16b8fd 100644 --- a/torch/optim/rmsprop.pyi +++ b/torch/optim/rmsprop.pyi @@ -1,5 +1,13 @@ -from typing import Tuple from .optimizer import _params_t, Optimizer class RMSprop(Optimizer): - def __init__(self, params: _params_t, lr: float=..., alpha: float=..., eps: float=..., weight_decay: float=..., momentum: float=..., centered: bool=...) -> None: ... + def __init__( + self, + params: _params_t, + lr: float = ..., + alpha: float = ..., + eps: float = ..., + weight_decay: float = ..., + momentum: float = ..., + centered: bool = ..., + ) -> None: ... diff --git a/torch/optim/rprop.pyi b/torch/optim/rprop.pyi index ddc2e60b5ea1..af5e07b3cbc6 100644 --- a/torch/optim/rprop.pyi +++ b/torch/optim/rprop.pyi @@ -1,5 +1,12 @@ from typing import Tuple + from .optimizer import _params_t, Optimizer class Rprop(Optimizer): - def __init__(self, params: _params_t, lr: float=..., etas: Tuple[float, float]=..., step_sizes: Tuple[float, float]=...) -> None: ... + def __init__( + self, + params: _params_t, + lr: float = ..., + etas: Tuple[float, float] = ..., + step_sizes: Tuple[float, float] = ..., + ) -> None: ... diff --git a/torch/optim/sgd.pyi b/torch/optim/sgd.pyi index 7675449d477b..6f8db42d3e17 100644 --- a/torch/optim/sgd.pyi +++ b/torch/optim/sgd.pyi @@ -1,4 +1,12 @@ from .optimizer import _params_t, Optimizer class SGD(Optimizer): - def __init__(self, params: _params_t, lr: float, momentum: float=..., dampening: float=..., weight_decay:float=..., nesterov:bool=...) -> None: ... + def __init__( + self, + params: _params_t, + lr: float, + momentum: float = ..., + dampening: float = ..., + weight_decay: float = ..., + nesterov: bool = ..., + ) -> None: ... diff --git a/torch/optim/sparse_adam.pyi b/torch/optim/sparse_adam.pyi index bc8e47756442..f93cb5b01c20 100644 --- a/torch/optim/sparse_adam.pyi +++ b/torch/optim/sparse_adam.pyi @@ -1,6 +1,12 @@ - from typing import Tuple + from .optimizer import _params_t, Optimizer class SparseAdam(Optimizer): - def __init__(self, params: _params_t, lr: float=..., betas: Tuple[float, float]=..., eps: float=...) -> None: ... + def __init__( + self, + params: _params_t, + lr: float = ..., + betas: Tuple[float, float] = ..., + eps: float = ..., + ) -> None: ... diff --git a/torch/optim/swa_utils.pyi b/torch/optim/swa_utils.pyi index 7e252552df33..0edb95a72965 100644 --- a/torch/optim/swa_utils.pyi +++ b/torch/optim/swa_utils.pyi @@ -1,17 +1,31 @@ -from .optimizer import Optimizer -from ..nn.modules import Module +from typing import Any, Callable, Iterable, Union + +from torch import device, Tensor +from torch.nn.modules import Module from .lr_scheduler import _LRScheduler -from .. import device, Tensor -from typing import Iterable, Any, Optional, Callable, Union, List +from .optimizer import Optimizer class AveragedModel(Module): - def __init__(self, model: Module, device: Union[int, device]=..., - avg_fn: Callable[[Tensor, Tensor, int], Tensor]=...) -> None:... + def __init__( + self, + model: Module, + device: Union[int, device] = ..., + avg_fn: Callable[[Tensor, Tensor, int], Tensor] = ..., + ) -> None: ... + def update_parameters(self, model: Module) -> None: ... - def update_parameters(self, model: Module) -> None:... - -def update_bn(loader: Iterable[Any], model: Module, device: Union[int, device]=...) -> None:... +def update_bn( + loader: Iterable[Any], + model: Module, + device: Union[int, device] = ..., +) -> None: ... class SWALR(_LRScheduler): - def __init__(self, optimizer: Optimizer, swa_lr: float, anneal_epochs: int, - anneal_strategy: str, last_epoch: int=...) -> None:... + def __init__( + self, + optimizer: Optimizer, + swa_lr: float, + anneal_epochs: int, + anneal_strategy: str, + last_epoch: int = ..., + ) -> None: ... diff --git a/torch/utils/data/datapipes/datapipe.pyi.in b/torch/utils/data/datapipes/datapipe.pyi.in index 46fb27a70337..9812f7d5495a 100644 --- a/torch/utils/data/datapipes/datapipe.pyi.in +++ b/torch/utils/data/datapipes/datapipe.pyi.in @@ -3,16 +3,16 @@ # Note that, for mypy, .pyi file takes precedent over .py file, such that we must define the interface for other # classes/objects here, even though we are not injecting extra code into them at the moment. -from torch.utils.data.datapipes._typing import _DataPipeMeta, _IterDataPipeMeta +from typing import Any, Callable, Dict, Generic, Iterator, Optional, TypeVar + +from torch.utils.data import Dataset, default_collate, IterableDataset from torch.utils.data.datapipes._hook_iterator import _SnapshotState -from typing import Any, Callable, Dict, Generic, Iterator, List, Optional, TypeVar, Union -from torch.utils.data import Dataset, IterableDataset, default_collate +from torch.utils.data.datapipes._typing import _DataPipeMeta, _IterDataPipeMeta -T_co = TypeVar('T_co', covariant=True) -T = TypeVar('T') +T_co = TypeVar("T_co", covariant=True) +T = TypeVar("T") UNTRACABLE_DATAFRAME_PIPES: Any - class MapDataPipe(Dataset[T_co], metaclass=_DataPipeMeta): functions: Dict[str, Callable] = ... reduce_ex_hook: Optional[Callable] = ... @@ -23,7 +23,11 @@ class MapDataPipe(Dataset[T_co], metaclass=_DataPipeMeta): @classmethod def register_function(cls, function_name: Any, function: Any) -> None: ... @classmethod - def register_datapipe_as_function(cls, function_name: Any, cls_to_register: Any): ... + def register_datapipe_as_function( + cls, + function_name: Any, + cls_to_register: Any, + ): ... def __getstate__(self): ... def __reduce_ex__(self, *args: Any, **kwargs: Any): ... @classmethod @@ -32,7 +36,6 @@ class MapDataPipe(Dataset[T_co], metaclass=_DataPipeMeta): def set_reduce_ex_hook(cls, hook_fn: Any) -> None: ... ${MapDataPipeMethods} - class IterDataPipe(IterableDataset[T_co], metaclass=_IterDataPipeMeta): functions: Dict[str, Callable] = ... reduce_ex_hook: Optional[Callable] = ... @@ -46,7 +49,12 @@ class IterDataPipe(IterableDataset[T_co], metaclass=_IterDataPipeMeta): @classmethod def register_function(cls, function_name: Any, function: Any) -> None: ... @classmethod - def register_datapipe_as_function(cls, function_name: Any, cls_to_register: Any, enable_df_api_tracing: bool = ...): ... + def register_datapipe_as_function( + cls, + function_name: Any, + cls_to_register: Any, + enable_df_api_tracing: bool = ..., + ): ... def __getstate__(self): ... def __reduce_ex__(self, *args: Any, **kwargs: Any): ... @classmethod @@ -55,39 +63,29 @@ class IterDataPipe(IterableDataset[T_co], metaclass=_IterDataPipeMeta): def set_reduce_ex_hook(cls, hook_fn: Any) -> None: ... ${IterDataPipeMethods} - class DFIterDataPipe(IterDataPipe): def _is_dfpipe(self): ... - class _DataPipeSerializationWrapper: def __init__(self, datapipe): ... def __getstate__(self): ... def __setstate__(self, state): ... def __len__(self): ... - class _IterDataPipeSerializationWrapper(_DataPipeSerializationWrapper, IterDataPipe): def __iter__(self): ... - class _MapDataPipeSerializationWrapper(_DataPipeSerializationWrapper, MapDataPipe): def __getitem__(self, idx): ... - class DataChunk(list, Generic[T]): def __init__(self, items): super().__init__(items) self.items = items - - def as_str(self, indent=''): + def as_str(self, indent: str = "") -> str: res = indent + "[" + ", ".join(str(i) for i in iter(self)) + "]" return res - def __iter__(self) -> Iterator[T]: - for i in super().__iter__(): - yield i - + yield from super().__iter__() def raw_iterator(self) -> T: # type: ignore[misc] - for i in self.items: - yield i + yield from self.items