Files
pytorch/test/allowlist_for_publicAPI.json
Xuehai Pan 45411d1fc9 Use absolute path path.resolve() -> path.absolute() (#129409)
Changes:

1. Always explicit `.absolute()`: `Path(__file__)` -> `Path(__file__).absolute()`
2. Replace `path.resolve()` with `path.absolute()` if the code is resolving the PyTorch repo root directory.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/129409
Approved by: https://github.com/albanD
2025-01-03 20:03:40 +00:00

2737 lines
58 KiB
JSON

{
"being_migrated": {
"torch.nn.intrinsic": "torch.ao.nn.intrinsic",
"torch.nn.intrinsic.modules": "torch.ao.nn.intrinsic.modules",
"torch.nn.intrinsic.modules.fused": "torch.ao.nn.intrinsic.modules.fused",
"torch.nn.intrinsic.qat": "torch.ao.nn.intrinsic.qat",
"torch.nn.intrinsic.qat.modules": "torch.ao.nn.intrinsic.qat.modules",
"torch.nn.intrinsic.qat.modules.conv_fused": "torch.ao.nn.intrinsic.qat.modules.conv_fused",
"torch.nn.intrinsic.qat.modules.linear_fused": "torch.ao.nn.intrinsic.qat.modules.linear_fused",
"torch.nn.intrinsic.qat.modules.linear_relu": "torch.ao.nn.intrinsic.qat.modules.linear_relu",
"torch.nn.intrinsic.quantized": "torch.ao.nn.intrinsic.quantized",
"torch.nn.intrinsic.quantized.modules": "torch.ao.nn.intrinsic.quantized.modules",
"torch.nn.intrinsic.quantized.modules.bn_relu": "torch.ao.nn.intrinsic.quantized.modules.bn_relu",
"torch.nn.intrinsic.quantized.modules.conv_relu": "torch.ao.nn.intrinsic.quantized.modules.conv_relu",
"torch.nn.intrinsic.quantized.modules.linear_relu": "torch.ao.nn.intrinsic.quantized.modules.linear_relu",
"torch.nn.intrinsic.quantized.dynamic": "torch.ao.nn.intrinsic.quantized.dynamic",
"torch.nn.intrinsic.quantized.dynamic.modules": "torch.ao.nn.intrinsic.quantized.dynamic.modules",
"torch.nn.intrinsic.quantized.dynamic.modules.linear_relu": "torch.ao.nn.intrinsic.quantized.dynamic.modules.linear_relu",
"torch.nn.qat": "torch.ao.nn.qat",
"torch.nn.qat.dynamic": "torch.ao.nn.qat.dynamic",
"torch.nn.qat.dynamic.modules": "torch.ao.nn.qat.dynamic.modules",
"torch.nn.qat.dynamic.modules.linear": "torch.ao.nn.qat.dynamic.modules.linear",
"torch.nn.qat.modules": "torch.ao.nn.qat.modules",
"torch.nn.qat.modules.conv": "torch.ao.nn.qat.modules.conv",
"torch.nn.qat.modules.embedding_ops": "torch.ao.nn.qat.modules.embedding_ops",
"torch.nn.qat.modules.linear": "torch.ao.nn.qat.modules.linear",
"torch.nn.quantized.functional": "torch.ao.nn.quantized.functional",
"torch.nn.quantized": "torch.ao.nn.quantized",
"torch.nn.quantized.modules": "torch.ao.nn.quantized.modules",
"torch.nn.quantized.dynamic": "torch.ao.nn.quantized.dynamic",
"torch.nn.quantized.dynamic.modules": "torch.ao.nn.quantized.dynamic.modules",
"torch.nn.quantized.dynamic.modules.rnn": "torch.ao.nn.quantized.dynamic.modules.rnn",
"torch.nn.quantizable": "torch.ao.nn.quantizable",
"torch.nn.quantizable.modules": "torch.ao.nn.quantizable.modules",
"torch.nn.quantizable.modules.activation": "torch.ao.nn.quantizable.modules.activation",
"torch.nn.quantizable.modules.rnn": "torch.ao.nn.quantizable.modules.rnn",
"torch.distributed.tensor.device_mesh": "torch.distributed.device_mesh"
},
"torch.backends": [
"contextmanager"
],
"torch.cuda.comm": [
"broadcast",
"broadcast_coalesced",
"reduce_add",
"reduce_add_coalesced",
"scatter",
"gather"
],
"torch.csrc.jit.tensorexpr.scripts.bisect": [
"bisect"
],
"torch.cuda.nccl": [
"init_rank",
"is_available",
"unique_id",
"version"
],
"torch.distributed": [
"AllToAllOptions",
"AllreduceCoalescedOptions",
"AllreduceOptions",
"BarrierOptions",
"BroadcastOptions",
"BuiltinCommHookType",
"Callable",
"DebugLevel",
"Dict",
"Enum",
"FileStore",
"GatherOptions",
"GradBucket",
"HashStore",
"Logger",
"namedtuple",
"Optional",
"PrefixStore",
"ProcessGroup",
"ProcessGroupGloo",
"ReduceOp",
"ReduceOptions",
"ReduceScatterOptions",
"Reducer",
"ScatterOptions",
"Store",
"TCPStore",
"Tuple",
"Union",
"get_debug_level",
"set_debug_level",
"set_debug_level_from_env",
"timedelta",
"ProcessGroupMPI",
"ProcessGroupNCCL"
],
"torch.distributed.checkpoint.state_dict": [
"Any",
"Callable",
"DDP",
"DTensor",
"Dict",
"DictValueType",
"FQNS_T",
"FSDP",
"FullOptimStateDictConfig",
"FullStateDictConfig",
"Iterable",
"List",
"ListDictValueType",
"OptimStateDictConfig",
"OptimizerStateType",
"Optional",
"PrimitiveType",
"Set",
"ShardedOptimStateDictConfig",
"ShardedStateDictConfig",
"ShardedTensor",
"StateDictConfig",
"StateDictType",
"Tuple",
"Union",
"ValueType",
"asdict",
"cast",
"chain",
"dataclass",
"field",
"no_type_check"
],
"torch.distributed.autograd": [
"DistAutogradContext",
"backward",
"get_gradients"
],
"torch.distributed.elastic.events": [
"Dict",
"Enum",
"EventMetadataValue",
"Optional"
],
"torch.distributed.elastic.events.handlers": [
"Dict",
"Optional",
"ScubaLogHandler",
"ScubaRdzvLogHandler"
],
"torch.distributed.elastic.metrics": [
"Optional",
"get_logger",
"TorchElasticService"
],
"torch.distributed.elastic.multiprocessing": [
"Callable",
"Dict",
"Tuple",
"Union",
"get_logger"
],
"torch.distributed.elastic.multiprocessing.redirects": [
"contextmanager",
"partial",
"redirect_stderr",
"redirect_stdout"
],
"torch.distributed.elastic.rendezvous": [
"RendezvousHandlerCreator"
],
"torch.distributed.elastic.rendezvous.api": [
"ABC",
"Any",
"Callable",
"Dict",
"Optional",
"RendezvousHandlerCreator",
"Store",
"Tuple",
"abstractmethod"
],
"torch.distributed.elastic.rendezvous.dynamic_rendezvous": [
"get_method_name"
],
"torch.distributed.elastic.utils.api": [
"Any",
"List",
"Template"
],
"torch.distributed.elastic.utils.data.elastic_distributed_sampler": [
"DistributedSampler"
],
"torch.distributed.elastic.utils.logging": [
"Optional",
"get_log_level"
],
"torch.distributed.elastic.utils.store": [
"List",
"timedelta"
],
"torch.distributed.nn": [
"Function",
"ReduceOp",
"group"
],
"torch.distributed.nn.functional": [
"Function",
"ReduceOp",
"group"
],
"torch.distributed.nn.jit.instantiator": [
"Optional",
"get_remote_module_template"
],
"torch.distributed.optim.utils": [
"Type"
],
"torch.distributed.remote_device": [
"Optional",
"Union"
],
"torch.distributed.rendezvous": [
"Dict",
"FileStore",
"Iterable",
"Optional",
"PrefixStore",
"Store",
"TCPStore",
"Tuple",
"Union",
"cast",
"timedelta",
"urlparse",
"urlunparse"
],
"torch.distributed.rpc": [],
"torch.fft": [
"Tensor",
"fft",
"fft2",
"fftfreq",
"fftn",
"fftshift",
"hfft",
"ifft",
"ifft2",
"ifftn",
"ifftshift",
"ihfft",
"irfft",
"irfft2",
"irfftn",
"rfft",
"rfft2",
"rfftfreq",
"rfftn"
],
"torch.functional": [
"istft",
"pca_lowrank",
"svd_lowrank"
],
"torch.futures": [
"Future"
],
"torch.fx": [
"PH",
"ProxyableClassMeta",
"CodeGen",
"Tracer",
"symbolic_trace",
"wrap"
],
"torch.fx.experimental.migrate_gradual_types.z3_types": [
"D"
],
"torch.fx.experimental.unification.core": [
"Iterator",
"assoc",
"dispatch",
"isvar",
"partial",
"unify",
"walk"
],
"torch.fx.experimental.unification.dispatch": [
"dispatch",
"partial"
],
"torch.fx.experimental.unification.more": [
"dispatch",
"reify",
"unify"
],
"torch.fx.experimental.unification.unification_tools": [
"first",
"getter",
"groupby"
],
"torch.fx.experimental.unification.variable": [
"contextmanager",
"dispatch",
"hashable",
"isvar"
],
"torch.fx.proxy": [
"assert_fn"
],
"torch.hub": [
"HTTPError",
"Path",
"Request",
"tqdm",
"urlopen",
"urlparse"
],
"torch.jit": [
"Attribute",
"Final",
"Iterator",
"ONNXTracedModule",
"RecursiveScriptClass",
"RecursiveScriptModule",
"ScriptModule",
"ScriptWarning",
"TopLevelTracedModule",
"TracedModule",
"TracerWarning",
"TracingCheckError",
"contextmanager",
"export",
"fork",
"freeze",
"fuser",
"ignore",
"interface",
"is_scripting",
"is_tracing",
"jit_module_from_flatbuffer",
"last_executed_optimized_graph",
"load",
"optimize_for_inference",
"optimized_execution",
"run_frozen_optimizations",
"save",
"save_jit_module_to_flatbuffer",
"script",
"script_method",
"set_fusion_strategy",
"set_module",
"trace",
"trace_module",
"unused",
"wait"
],
"torch.jit.annotations": [
"Any",
"AnyType",
"ComplexType",
"Dict",
"DictType",
"EvalEnv",
"FloatType",
"IntType",
"List",
"ListType",
"StringType",
"TensorType",
"Tuple",
"TupleType",
"get_enum_value_type",
"is_dict",
"is_function_or_method",
"is_list",
"is_optional",
"is_tensor",
"is_tuple",
"is_union",
"is_vararg"
],
"torch.jit.frontend": [
"Apply",
"Assert",
"Assign",
"Attribute",
"AugAssign",
"BinOp",
"Break",
"ClassDef",
"Const",
"Continue",
"Decl",
"Def",
"Delete",
"DictComp",
"DictLiteral",
"Dots",
"EmptyTypeAnnotation",
"ExprStmt",
"FalseLiteral",
"For",
"FunctionModifiers",
"Ident",
"If",
"List",
"ListComp",
"ListLiteral",
"NoneLiteral",
"Param",
"Pass",
"Property",
"Raise",
"Return",
"Select",
"SliceExpr",
"Starred",
"Stmt",
"StringLiteral",
"Subscript",
"TernaryIf",
"TrueLiteral",
"Tuple",
"TupleLiteral",
"UnaryOp",
"Var",
"While",
"With",
"WithItem",
"dedent",
"get_qualified_name",
"get_source_lines_and_file",
"is_static_fn",
"make_source_context",
"namedtuple",
"parse_def",
"should_drop",
"monkeytype_trace"
],
"torch.linalg": [
"LinAlgError",
"Tensor",
"cholesky",
"cholesky_ex",
"cond",
"cross",
"det",
"diagonal",
"eig",
"eigh",
"eigvals",
"eigvalsh",
"householder_product",
"inv",
"inv_ex",
"ldl_factor",
"ldl_factor_ex",
"ldl_solve",
"lstsq",
"lu",
"lu_factor",
"lu_factor_ex",
"lu_solve",
"matmul",
"matrix_exp",
"matrix_norm",
"matrix_power",
"matrix_rank",
"multi_dot",
"norm",
"pinv",
"qr",
"slogdet",
"solve",
"solve_ex",
"solve_triangular",
"svd",
"svdvals",
"tensorinv",
"tensorsolve",
"vander",
"vecdot",
"vector_norm"
],
"torch.masked": [
"amax",
"amin",
"argmax",
"argmin",
"as_masked_tensor",
"cumprod",
"cumsum",
"is_masked_tensor",
"log_softmax",
"logaddexp",
"logsumexp",
"masked_tensor",
"MaskedTensor",
"mean",
"median",
"norm",
"normalize",
"prod",
"softmax",
"softmin",
"std",
"sum",
"var"
],
"torch.multiprocessing": [
"Array",
"AuthenticationError",
"Barrier",
"BoundedSemaphore",
"BufferTooShort",
"Condition",
"Event",
"JoinableQueue",
"Lock",
"Manager",
"Pipe",
"Pool",
"Process",
"ProcessContext",
"ProcessError",
"ProcessExitedException",
"ProcessRaisedException",
"Queue",
"RLock",
"RawArray",
"RawValue",
"Semaphore",
"SimpleQueue",
"SpawnContext",
"TimeoutError",
"Value",
"active_children",
"allow_connection_pickling",
"cpu_count",
"current_process",
"freeze_support",
"get_all_start_methods",
"get_context",
"get_logger",
"get_start_method",
"init_reductions",
"log_to_stderr",
"set_executable",
"set_forkserver_preload",
"set_start_method",
"spawn",
"start_processes",
"parent_process"
],
"torch.multiprocessing.reductions": [
"ForkingPickler",
"Union",
"check_serializing_named_tensor",
"register_after_fork"
],
"torch.multiprocessing.spawn": [
"Optional"
],
"torch.nested": [
"nested_tensor",
"to_padded_tensor"
],
"torch.nn.common_types": [
"Optional",
"Tensor",
"Tuple",
"TypeVar",
"Union"
],
"torch.nn.functional": [
"Callable",
"DType",
"List",
"Optional",
"Tensor",
"Tuple",
"Union",
"adaptive_avg_pool1d",
"avg_pool1d",
"avg_pool2d",
"avg_pool3d",
"bilinear",
"boolean_dispatch",
"celu_",
"channel_shuffle",
"conv1d",
"conv2d",
"conv3d",
"conv_tbc",
"conv_transpose1d",
"conv_transpose2d",
"conv_transpose3d",
"cosine_similarity",
"elu_",
"gelu",
"handle_torch_function",
"hardshrink",
"hardtanh_",
"has_torch_function",
"has_torch_function_unary",
"has_torch_function_variadic",
"leaky_relu_",
"linear",
"logsigmoid",
"native_channel_shuffle",
"one_hot",
"pairwise_distance",
"pdist",
"pixel_shuffle",
"pixel_unshuffle",
"prelu",
"relu_",
"rrelu_",
"scaled_dot_product_attention",
"selu_",
"softplus",
"softshrink",
"threshold_"
],
"torch.nn.init": [
"Tensor"
],
"torch.nn.intrinsic.modules": [
"_FusedModule"
],
"torch.nn.modules.linear": [
"NonDynamicallyQuantizableLinear"
],
"torch.nn.modules.rnn": [
"apply_permutation"
],
"torch.nn.parallel": [
"DistributedDataParallelCPU"
],
"torch.nn.parallel.comm": [
"List"
],
"torch.nn.parallel.parallel_apply": [
"ExceptionWrapper",
"autocast"
],
"torch.nn.parallel.replicate": [
"OrderedDict"
],
"torch.nn.parallel.scatter_gather": [
"is_namedtuple"
],
"torch.nn.parameter": [
"OrderedDict"
],
"torch.nn.utils.rnn": [
"bind",
"PackedSequence_"
],
"torch.nn.utils.convert_parameters": [
"Iterable",
"Optional"
],
"torch.onnx": [
"Dict",
"OperatorExportTypes",
"Optional",
"TensorProtoDataType",
"TrainingMode"
],
"torch.overrides": [
"BaseTorchFunctionMode",
"TorchFunctionMode",
"TorchFunctionModeMeta",
"enable_torch_function_mode",
"get_default_nowrap_functions",
"has_torch_function"
],
"torch.package.analyze.is_from_package": [
"Any",
"ModuleType",
"is_mangled"
],
"torch.package.find_file_dependencies": [
"List",
"Optional",
"Tuple"
],
"torch.package.glob_group": [
"GlobPattern",
"Iterable",
"Union"
],
"torch.profiler": [
"DeviceType",
"ProfilerActivity",
"kineto_available",
"record_function"
],
"torch.quantization": [
"ABC",
"DeQuantStub",
"FakeQuantize",
"FakeQuantizeBase",
"FixedQParamsFakeQuantize",
"FusedMovingAvgObsFakeQuantize",
"HistogramObserver",
"MinMaxObserver",
"MovingAverageMinMaxObserver",
"MovingAveragePerChannelMinMaxObserver",
"NoopObserver",
"ObserverBase",
"PerChannelMinMaxObserver",
"PlaceholderObserver",
"QConfig",
"QConfigAny",
"QConfigDynamic",
"QuantStub",
"QuantType",
"QuantWrapper",
"RecordingObserver",
"_add_module_to_qconfig_obs_ctr",
"add_quant_dequant",
"_assert_valid_qconfig",
"convert",
"convert_dynamic_jit",
"convert_jit",
"default_fixed_qparams_range_0to1_fake_quant",
"default_affine_fixed_qparams_fake_quant",
"default_debug_observer",
"default_dynamic_quant_observer",
"default_fake_quant",
"default_float_qparams_observer",
"default_fused_act_fake_quant",
"default_fused_per_channel_wt_fake_quant",
"default_fused_wt_fake_quant",
"default_histogram_fake_quant",
"default_histogram_observer",
"default_observer",
"default_per_channel_weight_fake_quant",
"default_per_channel_weight_observer",
"default_placeholder_observer",
"default_fixed_qparams_range_neg1to1_fake_quant",
"default_symmetric_fixed_qparams_fake_quant",
"default_weight_fake_quant",
"default_weight_observer",
"disable_fake_quant",
"disable_observer",
"enable_fake_quant",
"enable_observer",
"fuse_conv_bn",
"fuse_conv_bn_jit",
"fuse_conv_bn_relu",
"fuse_linear_bn",
"fuse_modules",
"get_default_compare_output_module_list",
"get_default_dynamic_quant_module_mappings",
"get_default_float_to_quantized_operator_mappings",
"get_default_qat_module_mappings",
"get_default_qat_qconfig",
"get_default_qconfig",
"get_default_qconfig_propagation_list",
"get_default_static_quant_module_mappings",
"get_dynamic_quant_module_class",
"get_fuser_method",
"get_observer_state_dict",
"get_quantized_operator",
"get_static_quant_module_class",
"load_observer_state_dict",
"no_observer_set",
"prepare",
"prepare_dynamic_jit",
"prepare_jit",
"prepare_qat",
"propagate_qconfig_",
"qconfig_equals",
"_get_quant_type_to_str",
"quantize",
"quantize_dynamic",
"quantize_dynamic_jit",
"quantize_jit",
"quantize_qat",
"script_qconfig",
"script_qconfig_dict",
"swap_module"
],
"torch.quantization.fake_quantize": [
"FakeQuantize",
"FakeQuantizeBase",
"FixedQParamsFakeQuantize",
"FusedMovingAvgObsFakeQuantize",
"default_fixed_qparams_range_0to1_fake_quant",
"default_affine_fixed_qparams_fake_quant",
"default_fake_quant",
"default_fused_act_fake_quant",
"default_fused_per_channel_wt_fake_quant",
"default_fused_wt_fake_quant",
"default_histogram_fake_quant",
"default_per_channel_weight_fake_quant",
"default_fixed_qparams_range_neg1to1_fake_quant",
"default_symmetric_fixed_qparams_fake_quant",
"default_weight_fake_quant",
"disable_fake_quant",
"disable_observer",
"enable_fake_quant",
"enable_observer"
],
"torch.quantization.fuse_modules": [
"fuse_conv_bn",
"fuse_conv_bn_relu",
"fuse_known_modules",
"fuse_modules",
"get_fuser_method"
],
"torch.quantization.fuser_method_mappings": [
"fuse_conv_bn",
"fuse_conv_bn_relu",
"fuse_linear_bn",
"get_fuser_method"
],
"torch.quantization.observer": [
"ABC",
"HistogramObserver",
"MinMaxObserver",
"MovingAverageMinMaxObserver",
"MovingAveragePerChannelMinMaxObserver",
"NoopObserver",
"ObserverBase",
"PerChannelMinMaxObserver",
"PlaceholderObserver",
"RecordingObserver",
"default_debug_observer",
"default_dynamic_quant_observer",
"default_float_qparams_observer",
"default_histogram_observer",
"default_observer",
"default_per_channel_weight_observer",
"default_placeholder_observer",
"default_weight_observer",
"get_observer_state_dict",
"load_observer_state_dict"
],
"torch.quantization.qconfig": [
"QConfig",
"QConfigAny",
"QConfigDynamic",
"_add_module_to_qconfig_obs_ctr",
"_assert_valid_qconfig",
"get_default_qat_qconfig",
"get_default_qconfig",
"qconfig_equals"
],
"torch.quantization.quant_type": [
"QuantType",
"_get_quant_type_to_str"
],
"torch.quantization.quantization_mappings": [
"get_default_compare_output_module_list",
"get_default_dynamic_quant_module_mappings",
"get_default_float_to_quantized_operator_mappings",
"get_default_qat_module_mappings",
"get_default_qconfig_propagation_list",
"get_default_static_quant_module_mappings",
"get_dynamic_quant_module_class",
"get_quantized_operator",
"get_static_quant_module_class",
"no_observer_set"
],
"torch.quantization.quantize": [
"add_quant_dequant",
"convert",
"prepare",
"prepare_qat",
"propagate_qconfig_",
"quantize",
"quantize_dynamic",
"quantize_qat",
"swap_module"
],
"torch.quantization.quantize_jit": [
"convert_dynamic_jit",
"convert_jit",
"fuse_conv_bn_jit",
"prepare_dynamic_jit",
"prepare_jit",
"quantize_dynamic_jit",
"quantize_jit",
"script_qconfig",
"script_qconfig_dict"
],
"torch.quantization.stubs": [
"DeQuantStub",
"QuantStub",
"QuantWrapper"
],
"torch.quasirandom": [
"Optional"
],
"torch.random": [
"Generator"
],
"torch.serialization": [
"Any",
"BinaryIO",
"Dict",
"IO",
"Optional",
"Storage",
"Tuple",
"Type",
"Union",
"cast",
"closing",
"contextmanager",
"get_source_lines_and_file",
"get_default_mmap_options",
"set_default_mmap_options"
],
"torch.sparse": [
"BFloat16Tensor",
"ByteTensor",
"CharTensor",
"DoubleTensor",
"FloatTensor",
"HalfTensor",
"IntTensor",
"LongTensor",
"ShortTensor",
"addmm",
"log_softmax",
"mm",
"softmax"
],
"torch.special": [
"airy_ai",
"bessel_j0",
"bessel_j1",
"bessel_y0",
"bessel_y1",
"chebyshev_polynomial_t",
"chebyshev_polynomial_u",
"chebyshev_polynomial_v",
"chebyshev_polynomial_w",
"digamma",
"entr",
"erf",
"erfc",
"erfcx",
"erfinv",
"exp2",
"expit",
"expm1",
"gammainc",
"gammaincc",
"gammaln",
"hermite_polynomial_h",
"hermite_polynomial_he",
"i0",
"i0e",
"i1",
"i1e",
"laguerre_polynomial_l",
"legendre_polynomial_p",
"log1p",
"log_ndtr",
"log_softmax",
"logit",
"logsumexp",
"modified_bessel_i0",
"modified_bessel_i1",
"modified_bessel_k0",
"modified_bessel_k1",
"multigammaln",
"ndtr",
"ndtri",
"polygamma",
"psi",
"round",
"scaled_modified_bessel_k0",
"scaled_modified_bessel_k1",
"shifted_chebyshev_polynomial_t",
"shifted_chebyshev_polynomial_u",
"shifted_chebyshev_polynomial_v",
"shifted_chebyshev_polynomial_w",
"sinc",
"softmax",
"spherical_bessel_j0",
"xlog1py",
"xlogy",
"zeta"
],
"torch.storage": [
"Any",
"Storage",
"Type",
"TypeVar",
"Union",
"cast",
"lru_cache"
],
"torch.testing": [
"FileCheck",
"all_types",
"all_types_and",
"all_types_and_complex",
"all_types_and_complex_and",
"all_types_and_half",
"assert_allclose",
"assert_close",
"complex_types",
"double_types",
"empty_types",
"floating_and_complex_types",
"floating_and_complex_types_and",
"floating_types",
"floating_types_and",
"floating_types_and_half",
"get_all_complex_dtypes",
"get_all_device_types",
"get_all_dtypes",
"get_all_fp_dtypes",
"get_all_int_dtypes",
"get_all_math_dtypes",
"integral_types",
"integral_types_and",
"make_non_contiguous",
"make_tensor",
"rand",
"randn"
],
"torch.types": [
"Any",
"Device",
"List",
"Number",
"Sequence",
"Tuple",
"Union"
],
"torch.utils.benchmark.utils.compare": [
"Colorize",
"Table",
"optional_min"
],
"torch.utils.benchmark.utils.cpp_jit": [
"Any",
"CallgrindModuleType",
"List",
"Optional",
"TimeitModuleType"
],
"torch.utils.benchmark.utils.fuzzer": [
"dtype_size",
"prod"
],
"torch.utils.benchmark.utils.sparse_fuzzer": [
"FuzzedTensor",
"Number",
"Optional",
"Tuple",
"Union"
],
"torch.utils.benchmark.utils.timer": [
"CPPTimer",
"timer"
],
"torch.utils.benchmark.utils.valgrind_wrapper.timer_interface": [
"GlobalsBridge",
"Serialization",
"wrapper_singleton"
],
"torch.utils.data": [
"_DatasetKind",
"argument_validation",
"default_collate",
"default_convert",
"functional_datapipe",
"get_worker_info",
"guaranteed_datapipes_determinism",
"non_deterministic",
"runtime_validation",
"runtime_validation_disabled"
],
"torch.utils.data.dataloader": [
"default_collate",
"default_convert",
"get_worker_info"
],
"torch.utils.data.datapipes.dataframe": [
"DFIterDataPipe"
],
"torch.utils.dlpack": [
"Any",
"to_dlpack"
],
"torch": [
"BFloat16Storage",
"BFloat16Tensor",
"ComplexDoubleStorage",
"ComplexFloatStorage",
"DisableTorchFunction",
"DisableTorchFunctionSubclass",
"Generator",
"HalfStorage",
"HalfTensor",
"QInt32Storage",
"QInt8Storage",
"QUInt2x4Storage",
"QUInt4x2Storage",
"QUInt8Storage",
"Storage",
"TypedStorage",
"_adaptive_avg_pool2d",
"_adaptive_avg_pool3d",
"_add_batch_dim",
"_add_relu",
"_add_relu_",
"_addmm_activation",
"_aminmax",
"_amp_foreach_non_finite_check_and_unscale_",
"_amp_update_scale_",
"_assert_async",
"_batch_norm_impl_index",
"_cast_Byte",
"_cast_Char",
"_cast_Double",
"_cast_Float",
"_cast_Half",
"_cast_Int",
"_cast_Long",
"_cast_Short",
"_choose_qparams_per_tensor",
"_coalesce",
"_compute_linear_combination",
"_conj",
"_conj_copy",
"_conj_physical",
"_convert_indices_from_coo_to_csr",
"_convert_indices_from_csr_to_coo",
"_convolution",
"_convolution_mode",
"_copy_from",
"_copy_from_and_resize",
"_ctc_loss",
"_cudnn_ctc_loss",
"_cudnn_init_dropout_state",
"_cudnn_rnn",
"_cudnn_rnn_flatten_weight",
"_cufft_clear_plan_cache",
"_cufft_get_plan_cache_max_size",
"_cufft_get_plan_cache_size",
"_cufft_set_plan_cache_max_size",
"_cummax_helper",
"_cummin_helper",
"_debug_has_internal_overlap",
"_det_lu_based_helper_backward_helper",
"_dim_arange",
"_dirichlet_grad",
"_disable_functionalization",
"_efficientzerotensor",
"_embedding_bag",
"_embedding_bag_forward_only",
"_empty_affine_quantized",
"_empty_per_channel_affine_quantized",
"_enable_functionalization",
"_euclidean_dist",
"_fake_quantize_learnable_per_channel_affine",
"_fake_quantize_learnable_per_tensor_affine",
"_fake_quantize_per_tensor_affine_cachemask_tensor_qparams",
"_fft_c2c",
"_fft_c2r",
"_fft_r2c",
"_foreach_abs",
"_foreach_abs_",
"_foreach_acos",
"_foreach_acos_",
"_foreach_add",
"_foreach_add_",
"_foreach_addcdiv",
"_foreach_addcdiv_",
"_foreach_addcmul",
"_foreach_addcmul_",
"_foreach_asin",
"_foreach_asin_",
"_foreach_atan",
"_foreach_atan_",
"_foreach_ceil",
"_foreach_ceil_",
"_foreach_cos",
"_foreach_cos_",
"_foreach_cosh",
"_foreach_cosh_",
"_foreach_div",
"_foreach_div_",
"_foreach_erf",
"_foreach_erf_",
"_foreach_erfc",
"_foreach_erfc_",
"_foreach_exp",
"_foreach_exp_",
"_foreach_expm1",
"_foreach_expm1_",
"_foreach_floor",
"_foreach_floor_",
"_foreach_frac",
"_foreach_frac_",
"_foreach_lgamma",
"_foreach_lgamma_",
"_foreach_log",
"_foreach_log10",
"_foreach_log10_",
"_foreach_log1p",
"_foreach_log1p_",
"_foreach_log2",
"_foreach_log2_",
"_foreach_log_",
"_foreach_maximum",
"_foreach_minimum",
"_foreach_mul",
"_foreach_mul_",
"_foreach_neg",
"_foreach_neg_",
"_foreach_norm",
"_foreach_reciprocal",
"_foreach_reciprocal_",
"_foreach_round",
"_foreach_round_",
"_foreach_sigmoid",
"_foreach_sigmoid_",
"_foreach_sign",
"_foreach_sign_",
"_foreach_sin",
"_foreach_sin_",
"_foreach_sinh",
"_foreach_sinh_",
"_foreach_sqrt",
"_foreach_sqrt_",
"_foreach_sub",
"_foreach_sub_",
"_foreach_tan",
"_foreach_tan_",
"_foreach_tanh",
"_foreach_tanh_",
"_foreach_trunc",
"_foreach_trunc_",
"_foreach_zero_",
"_from_functional_tensor",
"_fused_dropout",
"_fused_moving_avg_obs_fq_helper",
"_fw_primal_copy",
"_grid_sampler_2d_cpu_fallback",
"_has_compatible_shallow_copy_type",
"_histogramdd_bin_edges",
"_histogramdd_from_bin_cts",
"_histogramdd_from_bin_tensors",
"_index_put_impl_",
"_indices_copy",
"_is_functional_tensor",
"_is_zerotensor",
"_linalg_check_errors",
"_linalg_qr_helper",
"_linalg_svd",
"_linalg_solve_ex",
"_log_softmax",
"_log_softmax_backward_data",
"_logcumsumexp",
"_lu_with_info",
"_make_dual",
"_make_dual_copy",
"_make_per_channel_quantized_tensor",
"_make_per_tensor_quantized_tensor",
"_masked_scale",
"_masked_softmax",
"_mkldnn_reshape",
"_mkldnn_transpose",
"_mkldnn_transpose_",
"_neg_view",
"_neg_view_copy",
"_nested_from_padded",
"_nested_from_padded_and_nested_example",
"_nnpack_available",
"_nnpack_spatial_convolution",
"_pack_padded_sequence",
"_pad_packed_sequence",
"_pin_memory",
"_remove_batch_dim",
"_reshape_alias_copy",
"_reshape_from_tensor",
"_rowwise_prune",
"_sample_dirichlet",
"_saturate_weight_to_fp16",
"_shape_as_tensor",
"_sobol_engine_draw",
"_sobol_engine_ff_",
"_sobol_engine_initialize_state_",
"_sobol_engine_scramble_",
"_softmax",
"_softmax_backward_data",
"_sparse_broadcast_to",
"_sparse_broadcast_to_copy",
"_sparse_coo_tensor_unsafe",
"_sparse_csr_prod",
"_sparse_csr_sum",
"_sparse_csr_tensor_unsafe",
"_sparse_log_softmax_backward_data",
"_sparse_softmax_backward_data",
"_sparse_sparse_matmul",
"_sparse_sum",
"_stack",
"_standard_gamma",
"_standard_gamma_grad",
"_sync",
"_test_serialization_subcmul",
"_to_cpu",
"_to_functional_tensor",
"_torch_cuda_cu_linker_symbol_op",
"_trilinear",
"_unique",
"_unique2",
"_unpack_dual",
"_use_cudnn_ctc_loss",
"_use_cudnn_rnn_flatten_weight",
"_validate_sparse_compressed_tensor_args",
"_validate_sparse_coo_tensor_args",
"_validate_sparse_csr_tensor_args",
"_values_copy",
"_weight_norm",
"_weight_norm_interface",
"autocast",
"broadcast_shapes",
"compiled_with_cxx11_abi",
"from_dlpack",
"lobpcg",
"lu",
"segment_reduce",
"set_default_dtype",
"set_grad_enabled",
"set_printoptions",
"unique"
],
"torch.ao.ns.fx.graph_matcher": [
"Any",
"Dict",
"FakeQuantizeBase",
"Graph",
"GraphModule",
"List",
"NSNodeTargetType",
"NSSubgraph",
"Node",
"ObserverBase",
"Optional",
"Set",
"Tuple",
"end_node_matches_reversed_fusion",
"get_base_name_to_sets_of_related_ops",
"get_reversed_fusions",
"get_type_a_related_to_b",
"get_unmatchable_types_map",
"getattr_from_fqn"
],
"torch.ao.ns.fx.graph_passes": [
"Any",
"Callable",
"Dict",
"Graph",
"GraphModule",
"List",
"NSNodeTargetType",
"NSSingleResultValuesType",
"NSSubgraph",
"Node",
"NodeInputOrOutputType",
"Optional",
"Set",
"Tuple",
"Union",
"get_arg_indices_of_inputs_to_log",
"get_new_attr_name_with_prefix",
"get_node_first_input_and_output_type",
"get_node_input_qparams",
"get_node_type_to_io_type_map",
"get_normalized_nth_input",
"get_number_of_non_param_args",
"get_target_type_str",
"getattr_from_fqn",
"map_arg",
"op_type_supports_shadowing",
"return_first_non_observer_node"
],
"torch.ao.ns.fx.mappings": [
"Callable",
"Dict",
"List",
"NSNodeTargetType",
"Optional",
"Set",
"Tuple",
"get_native_backend_config"
],
"torch.ao.ns.fx.n_shadows_utils": [
"Any",
"Callable",
"Dict",
"Graph",
"GraphModule",
"List",
"NSResultsType",
"NSSingleResultValuesType",
"Node",
"Optional",
"QConfigAny",
"QConfigMapping",
"Set",
"Tuple",
"get_normalized_nth_input",
"get_target_type_str",
"getattr_from_fqn",
"tree_map"
],
"torch.ao.ns.fx.ns_types": [
"Any",
"Callable",
"Dict",
"List",
"NSNodeTargetType",
"NSResultsType",
"NSSingleResultType",
"NamedTuple",
"Node",
"Union"
],
"torch.ao.ns.fx.pattern_utils": [
"Any",
"Callable",
"Dict",
"FakeQuantizeBase",
"GraphModule",
"List",
"NSFusionElType",
"NSFusionType",
"NSNodeTargetType",
"Node",
"ObserverBase",
"Set",
"Tuple",
"Union",
"get_native_backend_config",
"getattr_from_fqn"
],
"torch.ao.ns.fx.utils": [
"Callable",
"Dict",
"FakeQuantizeBase",
"GraphModule",
"List",
"NSNodeTargetType",
"NSResultsType",
"Node",
"ObserverBase",
"Optional",
"Set",
"Tuple",
"Union",
"getattr_from_fqn"
],
"torch.ao.ns.fx.weight_utils": [
"Callable",
"Dict",
"GraphModule",
"List",
"NSSingleResultType",
"NSSingleResultValuesType",
"Node",
"Optional",
"get_target_type_str",
"getattr_from_fqn",
"return_first_non_observer_node"
],
"torch.ao.pruning": [
"get_dynamic_sparse_quantized_mapping",
"get_static_sparse_quantized_mapping"
],
"torch.ao.quantization.fx.lstm_utils": [
"Any",
"BackendConfig",
"Callable",
"FakeQuantizeBase",
"Optional",
"QConfig",
"QConfigMapping",
"Tuple",
"convert_to_reference_fx",
"default_weight_fake_quant",
"default_weight_observer",
"prepare_fx"
],
"torch.ao.quantization.fx.tracer": [
"ScopeContextManager"
],
"torch.ao.quantization.pt2e.prepare": [
"Any",
"Argument",
"Dict",
"EdgeOrNode",
"FakeTensor",
"GraphModule",
"Node",
"ObserverOrFakeQuantize",
"PrepareCustomConfig",
"QConfigAny",
"QConfigMapping",
"QuantizationAnnotation",
"SharedQuantizationSpec",
"Tuple",
"Union"
],
"torch.ao.quantization.pt2e.qat_utils": [
"Any",
"Callable",
"DerivedQuantizationSpec",
"Dict",
"EdgeOrNode",
"Graph",
"GraphModule",
"List",
"Node",
"QuantizationSpecBase",
"SharedQuantizationSpec",
"Tuple",
"fold_bn_weights_into_conv_node",
"replace_pattern_with_filters"
],
"torch.ao.quantization.quantize_fx": [
"Any",
"BackendConfig",
"ConvertCustomConfig",
"Dict",
"FuseCustomConfig",
"GraphModule",
"ObservedGraphModule",
"Optional",
"PrepareCustomConfig",
"QConfigMapping",
"QuantizationTracer",
"Scope",
"ScopeContextManager",
"Tuple",
"Union",
"convert",
"fuse",
"get_custom_module_class_keys",
"get_skipped_module_name_and_classes",
"get_tensorrt_backend_config",
"prepare"
],
"torch.ao.quantization.quantizer.utils": [
"List",
"Node",
"QuantizationAnnotation"
],
"torch.ao.quantization.quantizer.xnnpack_quantizer": [
"OperatorConfig",
"OperatorPatternType",
"QuantizationConfig",
"propagate_annotation"
],
"torch.ao.quantization.quantizer.xnnpack_quantizer_utils": [
"register_annotator"
],
"torch.backends.xeon.run_cpu": [
"ArgumentParser",
"Dict",
"List",
"RawTextHelpFormatter",
"Std",
"expanduser",
"start_processes"
],
"torch.distributed.algorithms.ddp_comm_hooks.mixed_precision_hooks": [
"Any",
"Variable",
"dataclass",
"no_type_check"
],
"torch.distributed.algorithms.model_averaging.hierarchical_model_averager": [
"Dict",
"Iterable",
"OrderedDict",
"Union"
],
"torch.distributed.argparse_util": [
"Action"
],
"torch.distributed.collective_utils": [
"Any",
"Callable",
"Generic",
"List",
"Optional",
"Tuple",
"TypeVar",
"Union",
"cast",
"dataclass"
],
"torch.distributed.elastic.rendezvous.c10d_rendezvous_backend": [
"Any",
"FileStore",
"NodeState",
"Optional",
"RendezvousBackend",
"RendezvousConnectionError",
"RendezvousError",
"RendezvousParameters",
"RendezvousStateError",
"Store",
"TCPStore",
"Token",
"Tuple",
"b64decode",
"b64encode",
"cast",
"construct_and_record_rdzv_event",
"parse_rendezvous_endpoint",
"timedelta"
],
"torch.distributed.elastic.rendezvous.etcd_rendezvous": [
"EtcdStore",
"Optional",
"RendezvousClosedError",
"RendezvousError",
"RendezvousHandler",
"RendezvousParameters",
"RendezvousTimeoutError",
"cas_delay",
"parse_rendezvous_endpoint"
],
"torch.distributed.elastic.rendezvous.etcd_rendezvous_backend": [
"EtcdAlreadyExist",
"EtcdClient",
"EtcdCompareFailed",
"EtcdException",
"EtcdKeyNotFound",
"EtcdResult",
"EtcdStore",
"Optional",
"RendezvousBackend",
"RendezvousConnectionError",
"RendezvousParameters",
"RendezvousStateError",
"Store",
"Token",
"Tuple",
"b64decode",
"b64encode",
"cast",
"parse_rendezvous_endpoint"
],
"torch.distributed.elastic.rendezvous.etcd_server": [
"Optional",
"TextIO",
"Union"
],
"torch.distributed.elastic.rendezvous.etcd_store": [
"Optional",
"Store",
"b64decode",
"b64encode"
],
"torch.distributed.elastic.rendezvous.static_tcp_rendezvous": [
"Optional",
"PrefixStore",
"RendezvousHandler",
"RendezvousParameters",
"Store",
"TCPStore",
"Tuple",
"cast",
"parse_rendezvous_endpoint"
],
"torch.distributed.elastic.utils.distributed": [
"closing",
"get_logger"
],
"torch.distributed.fsdp.sharded_grad_scaler": [
"Any",
"Dict",
"GradScaler",
"Iterable",
"List",
"OptState",
"Optional",
"ProcessGroup",
"Sequence",
"Tuple",
"Union",
"defaultdict",
"overload"
],
"torch.distributed.launch": [
"get_args_parser",
"run"
],
"torch.distributed.rpc.rref_proxy": [
"Future",
"partial",
"rpc_async"
],
"torch.distributed.run": [
"ArgumentParser",
"Callable",
"LaunchConfig",
"List",
"Std",
"Tuple",
"Union",
"check_env",
"elastic_launch",
"env",
"get_logger",
"macros",
"record",
"DefaultLogsSpecs",
"LogsSpecs",
"Optional",
"Set",
"Type"
],
"torch.fx.annotate": [
"Proxy",
"compatibility"
],
"torch.fx.experimental.accelerator_partitioner": [
"Deque",
"Device",
"Dict",
"GraphModule",
"List",
"NamedTuple",
"Node",
"NodeLatency",
"Partition",
"PartitionMode",
"PartitionerConfig",
"Set",
"Tuple",
"deque",
"get_extra_size_of",
"get_latency_of_partitioned_graph",
"get_partition_to_latency_mapping",
"get_size_of_all_nodes",
"map_arg",
"split_module"
],
"torch.fx.experimental.graph_gradual_typechecker": [
"BatchNorm2d",
"Callable",
"Conv2d",
"Dict",
"Equality",
"Node",
"Target",
"TensorType",
"Var",
"is_consistent",
"is_more_precise",
"reduce"
],
"torch.fx.experimental.merge_matmul": [
"Dict",
"List",
"Node",
"Tuple",
"legalize_graph",
"symbolic_trace"
],
"torch.fx.experimental.meta_tracer": [
"Any",
"Callable",
"Dict",
"Optional",
"Union"
],
"torch.fx.experimental.migrate_gradual_types.constraint": [
"TensorType"
],
"torch.fx.experimental.migrate_gradual_types.constraint_generator": [
"ApplyBroadcasting",
"BatchNorm2d",
"BinConstraintD",
"BinConstraintT",
"CalcConv",
"CalcMaxPool",
"CalcProduct",
"Callable",
"CanReshape",
"Conj",
"Conv2d",
"DGreatestUpperBound",
"DVar",
"Dict",
"Disj",
"F",
"GetItem",
"GetItemTensor",
"IndexSelect",
"Iterable",
"Node",
"T",
"TGreatestUpperBound",
"TVar",
"Target",
"TensorType",
"Transpose",
"gen_bvar",
"gen_dvar",
"gen_nat_constraints",
"gen_tensor_dims",
"gen_tvar"
],
"torch.fx.experimental.migrate_gradual_types.constraint_transformation": [
"ApplyBroadcasting",
"BinConstraintD",
"BinConstraintT",
"CalcConv",
"CalcMaxPool",
"CalcProduct",
"Callable",
"CanReshape",
"Conj",
"Constraint",
"DGreatestUpperBound",
"DVar",
"Dict",
"Disj",
"F",
"GetItem",
"GetItemTensor",
"IndexSelect",
"List",
"Prod",
"T",
"TGreatestUpperBound",
"TVar",
"TensorType",
"Transpose",
"gen_dvar",
"gen_nat_constraints",
"gen_tensor_dims"
],
"torch.fx.experimental.migrate_gradual_types.transform_to_z3": [
"BVar",
"BinConstraintD",
"BinConstraintT",
"Conj",
"ConstraintGenerator",
"D",
"DVar",
"Disj",
"F",
"Prod",
"T",
"TVar",
"TensorType",
"is_algebraic_expression",
"is_bool_expr",
"is_dim",
"transform_constraint"
],
"torch.fx.experimental.migrate_gradual_types.util": [
"BVar",
"BinConstraintD",
"DVar",
"TVar"
],
"torch.fx.experimental.normalize": [
"AnnotateTypesWithSchema",
"Any",
"Argument",
"Callable",
"Dict",
"Node",
"Optional",
"Proxy",
"Target",
"Transformer",
"Tuple",
"create_type_hint",
"map_aggregate",
"normalize_function",
"normalize_module"
],
"torch.fx.experimental.optimization": [
"Any",
"Argument",
"Dict",
"Enum",
"Iterable",
"List",
"Optional",
"ShapeProp",
"Target",
"Tuple",
"Type",
"cast",
"defaultdict",
"fuse_conv_bn_eval"
],
"torch.fx.experimental.partitioner_utils": [
"Dict",
"Enum",
"List",
"NamedTuple",
"Node",
"Set",
"map_arg"
],
"torch.fx.experimental.proxy_tensor": [
"PreDispatchTorchFunctionMode",
"ProxySymDispatchMode",
"ProxyTorchDispatchMode",
"decompose",
"disable_autocast_cache",
"disable_proxy_modes_tracing",
"extract_val",
"fake_signature",
"fetch_sym_proxy",
"fetch_object_proxy",
"get_isolated_graphmodule",
"get_proxy_slot",
"get_torch_dispatch_modes",
"has_proxy_slot",
"is_sym_node",
"maybe_handle_decomp",
"proxy_call",
"set_meta",
"set_original_aten_op",
"set_proxy_slot",
"snapshot_fake",
"thunkify",
"track_tensor",
"track_tensor_tree",
"wrap_key",
"wrapper_and_args_for_make_fx",
"TorchFunctionMetadataMode"
],
"torch.fx.experimental.rewriter": [
"Any",
"Callable",
"Dict",
"FunctionType",
"Graph",
"Optional",
"Tracer",
"Union",
"cast",
"normalize_source_lines"
],
"torch.fx.experimental.schema_type_annotation": [
"Any",
"Argument",
"Dict",
"Optional",
"Target",
"Transformer",
"Tuple"
],
"torch.fx.experimental.sym_dispatch_mode": [
"sym_function_mode",
"set_sym_function_mode"
],
"torch.fx.experimental.sym_node": [
"SymNode",
"method_to_operator",
"magic_methods",
"to_node",
"wrap_node",
"is_channels_last_contiguous_2d",
"is_channels_last_contiguous_3d",
"is_channels_last_strides_2d",
"is_channels_last_strides_3d",
"is_non_overlapping_and_dense_indicator",
"sympy_is_channels_last_contiguous_2d",
"sympy_is_channels_last_contiguous_3d",
"sympy_is_channels_last_strides_2d",
"sympy_is_channels_last_strides_3d",
"sympy_is_channels_last_strides_generic",
"is_contiguous",
"sympy_is_contiguous",
"sympy_is_contiguous_generic",
"sym_sqrt"
],
"torch.fx.experimental.symbolic_shapes": [
"Constraint",
"ConstraintViolationError",
"DimConstraints",
"DimDynamic",
"DynamicDimConstraintPrinter",
"EqualityConstraint",
"GuardOnDataDependentSymNode",
"LoggingShapeGuardPrinter",
"SymExprPrinter",
"RelaxedUnspecConstraint",
"RuntimeAssert",
"ShapeGuardPrinter",
"ShapeGuardPythonPrinter",
"StrictMinMaxConstraint",
"bind_symbols",
"cast_symbool_to_symint_guardless",
"constrain_range",
"constrain_unify",
"definitely_false",
"definitely_true",
"error",
"eval_guards",
"eval_is_non_overlapping_and_dense",
"expect_true",
"find_symbol_binding_fx_nodes",
"free_unbacked_symbols",
"fx_placeholder_targets",
"fx_placeholder_vals",
"guard_bool",
"has_hint",
"is_symbolic",
"parallel_and",
"parallel_or",
"safe_expand",
"uninteresting_files",
"CallMethodKey",
"DivideByKey",
"InnerTensorKey",
"PropagateUnbackedSymInts",
"ShapeEnvSettings",
"log_lru_cache_stats",
"PendingUnbackedSymbolNotFound",
"lru_cache"
],
"torch.fx.experimental.unification.match": [
"first",
"freeze",
"groupby",
"isvar",
"reify",
"unify"
],
"torch.fx.experimental.unify_refinements": [
"Refine",
"TensorType",
"Var",
"unify"
],
"torch.fx.experimental.validator": [
"bisect"
],
"torch.fx.passes.backends.cudagraphs": [
"CapabilityBasedPartitioner",
"FakeTensorProp",
"OperatorSupport",
"tree_map"
],
"torch.fx.passes.dialect.common.cse_pass": [
"Any",
"Dict",
"Graph",
"GraphModule",
"Node",
"PassBase",
"PassResult",
"Tuple",
"tree_flatten"
],
"torch.fx.passes.infra.partitioner": [
"Deque",
"Dict",
"GraphModule",
"Iterable",
"List",
"Node",
"OperatorSupportBase",
"Optional",
"Sequence",
"Set",
"copy",
"deque",
"fuse_by_partitions"
],
"torch.fx.passes.tests.test_pass_manager": [
"PassManager",
"inplace_wrapper",
"these_before_those_pass_constraint",
"this_before_that_pass_constraint"
],
"torch.fx.passes.utils.fuser_utils": [
"Dict",
"Graph",
"GraphModule",
"List",
"Node",
"NodeList",
"NodeSet",
"SimpleQueue",
"Tuple",
"compatibility",
"legalize_graph",
"lift_subgraph_as_module"
],
"torch.fx.tensor_type": [
"Var",
"compatibility"
],
"torch.jit.generate_bytecode": [
"List"
],
"torch.jit.mobile": [
"validate_map_location"
],
"torch.jit.quantized": [
"List",
"Optional",
"PackedSequence",
"Tensor",
"Tuple"
],
"torch.jit.unsupported_tensor_ops": [
"Any",
"Dict",
"dedent"
],
"torch.monitor": [
"Aggregation",
"Event",
"EventHandlerHandle",
"Stat",
"data_value_t",
"log_event",
"register_event_handler",
"unregister_event_handler"
],
"torch.multiprocessing.pool": [
"SimpleQueue"
],
"torch.multiprocessing.queue": [
"ForkingPickler"
],
"torch.nn.quantized.dynamic.modules.conv": [
"Conv1d",
"Conv2d",
"Conv3d",
"ConvTranspose1d",
"ConvTranspose2d",
"ConvTranspose3d"
],
"torch.nn.quantized.dynamic.modules.linear": [
"Linear"
],
"torch.nn.quantized.modules.activation": [
"ELU",
"Hardswish",
"LeakyReLU",
"MultiheadAttention",
"PReLU",
"ReLU6",
"Sigmoid",
"Softmax"
],
"torch.nn.quantized.modules.batchnorm": [
"BatchNorm2d",
"BatchNorm3d"
],
"torch.nn.quantized.modules.conv": [
"Conv1d",
"Conv2d",
"Conv3d",
"ConvTranspose1d",
"ConvTranspose2d",
"ConvTranspose3d"
],
"torch.nn.quantized.modules.dropout": [
"Dropout"
],
"torch.nn.quantized.modules.embedding_ops": [
"Embedding",
"EmbeddingBag",
"EmbeddingPackedParams"
],
"torch.nn.quantized.modules.functional_modules": [
"FXFloatFunctional",
"FloatFunctional",
"QFunctional"
],
"torch.nn.quantized.modules.linear": [
"Linear",
"LinearPackedParams"
],
"torch.nn.quantized.modules.normalization": [
"GroupNorm",
"InstanceNorm1d",
"InstanceNorm2d",
"InstanceNorm3d",
"LayerNorm"
],
"torch.nn.quantized.modules.rnn": [
"LSTM"
],
"torch.nn.quantized.modules.utils": [
"WeightedQuantizedModule"
],
"torch.nn.utils.prune": [
"ABC",
"Iterable",
"Tuple",
"abstractmethod"
],
"torch.onnx.verification": [
"Any",
"Callable",
"Collection",
"Dict",
"FrozenSet",
"List",
"Mapping",
"Number",
"Optional",
"Sequence",
"Set",
"Tuple",
"Union"
],
"torch.quantization.fx": [
"convert",
"fuse",
"prepare"
],
"torch.quantization.fx.convert": [
"convert"
],
"torch.quantization.fx.fuse": [
"fuse"
],
"torch.quantization.fx.fusion_patterns": [
"DefaultFuseHandler",
"FuseHandler"
],
"torch.quantization.fx.graph_module": [
"FusedGraphModule",
"GraphModule",
"ObservedGraphModule",
"ObservedStandaloneGraphModule",
"QuantizedGraphModule"
],
"torch.quantization.fx.match_utils": [
"MatchAllNode"
],
"torch.quantization.fx.pattern_utils": [
"QuantizeHandler",
"get_default_fusion_patterns",
"get_default_output_activation_post_process_map",
"get_default_quant_patterns"
],
"torch.quantization.fx.prepare": [
"prepare"
],
"torch.quantization.fx.quantization_patterns": [
"BatchNormQuantizeHandler",
"BinaryOpQuantizeHandler",
"CatQuantizeHandler",
"ConvReluQuantizeHandler",
"CopyNodeQuantizeHandler",
"CustomModuleQuantizeHandler",
"DefaultNodeQuantizeHandler",
"EmbeddingQuantizeHandler",
"FixedQParamsOpQuantizeHandler",
"GeneralTensorShapeOpQuantizeHandler",
"LinearReLUQuantizeHandler",
"QuantizeHandler",
"RNNDynamicQuantizeHandler",
"StandaloneModuleQuantizeHandler"
],
"torch.quantization.fx.quantization_types": [
"Pattern",
"QuantizerCls"
],
"torch.quantization.fx.utils": [
"all_node_args_have_no_tensors",
"assert_and_get_unique_device",
"create_getattr_from_value",
"get_custom_module_class_keys",
"get_linear_prepack_op_for_dtype",
"get_new_attr_name_with_prefix",
"get_non_observable_arg_indexes_and_types",
"get_qconv_prepack_op",
"graph_module_from_producer_nodes",
"maybe_get_next_module"
],
"torch.quantization.quantize_fx": [
"ObservedGraphModule",
"QuantizationTracer",
"Scope",
"ScopeContextManager",
"convert_fx",
"fuse_fx",
"prepare_fx",
"prepare_qat_fx"
],
"torch.quantization.utils": [
"activation_dtype",
"activation_is_int8_quantized",
"activation_is_statically_quantized",
"calculate_qmin_qmax",
"check_min_max_valid",
"get_combined_dict",
"get_qconfig_dtypes",
"get_qparam_dict",
"get_quant_type",
"get_swapped_custom_module_class",
"getattr_from_fqn",
"is_per_channel",
"is_per_tensor",
"weight_dtype",
"weight_is_quantized",
"weight_is_statically_quantized"
],
"torch.utils.benchmark": [
"Number",
"Optional",
"Tuple",
"Union",
"timer"
],
"torch.utils.benchmark.examples.op_benchmark": [
"BinaryOpFuzzer",
"Timer",
"UnaryOpFuzzer"
],
"torch.utils.benchmark.examples.spectral_ops_fuzz_test": [
"ArgumentParser",
"Iterable",
"SpectralOpFuzzer",
"namedtuple"
],
"torch.utils.benchmark.op_fuzzers.binary": [
"FuzzedParameter",
"FuzzedTensor",
"Fuzzer",
"ParameterAlias"
],
"torch.utils.benchmark.op_fuzzers.sparse_binary": [
"FuzzedParameter",
"FuzzedSparseTensor",
"Fuzzer",
"ParameterAlias"
],
"torch.utils.benchmark.op_fuzzers.sparse_unary": [
"FuzzedParameter",
"FuzzedSparseTensor",
"Fuzzer",
"ParameterAlias"
],
"torch.utils.benchmark.op_fuzzers.spectral": [
"power_range"
],
"torch.utils.benchmark.op_fuzzers.unary": [
"FuzzedParameter",
"FuzzedTensor",
"Fuzzer",
"ParameterAlias"
],
"torch.utils.benchmark.utils.compile": [
"bench_loop"
],
"torch.utils.bundled_inputs": [
"Any",
"Callable",
"Dict",
"List",
"ListType",
"NamedTuple",
"Optional",
"Sequence",
"Tuple",
"TupleType",
"TypeVar",
"Union",
"wrap_cpp_module"
],
"torch.utils.collect_env": [
"namedtuple"
],
"torch.utils.data.datapipes.gen_pyi": [
"Any",
"Dict",
"List",
"Set",
"Tuple",
"Union",
"defaultdict",
"Path"
],
"torch.utils.data.datapipes.utils.snapshot": [
"IterDataPipe",
"apply_random_seed"
],
"torch.utils.flop_counter": [
"addmm_flop",
"baddbmm_flop",
"bmm_flop",
"conv_backward_flop",
"conv_flop",
"conv_flop_count",
"convert_num_with_suffix",
"convert_to_percent_str",
"get_shape",
"get_suffix_str",
"mm_flop",
"normalize_tuple",
"sdpa_backward_flop",
"sdpa_backward_flop_count",
"sdpa_flop",
"sdpa_flop_count",
"shape_wrapper",
"transpose_shape"
],
"torch.utils.jit.log_extract": [
"Any",
"List",
"Timer",
"Tuple",
"cast",
"contextmanager"
],
"torch.utils.mobile_optimizer": [
"Enum",
"List",
"MobileOptimizerType",
"Optional",
"Set"
],
"torch.utils.model_dump": [
"main"
],
"torch.utils.model_zoo": [
"load_url",
"tqdm"
],
"torch.utils.tensorboard": [
"RecordWriter"
],
"torch.ao.quantization.experimental.APoT_tensor": [
"APoTQuantizer"
],
"torch.ao.quantization.experimental.fake_quantize": [
"APoTObserver",
"FakeQuantizeBase",
"Tensor"
],
"torch.ao.quantization.experimental.fake_quantize_function": [
"dequantize_APoT",
"quantize_APoT",
"Tensor"
],
"torch.ao.quantization.experimental.linear": [
"APoTObserver",
"quantize_APoT",
"WeightedQuantizedModule"
],
"torch.ao.quantization.experimental.observer": [
"apot_to_float",
"float_to_apot",
"ObserverBase"
],
"torch.ao.quantization.experimental.qconfig": [
"APoTFakeQuantize",
"default_symmetric_fake_quant",
"default_weight_symmetric_fake_quant",
"FakeQuantize",
"MinMaxObserver",
"QConfig"
],
"torch.ao.quantization.experimental.quantizer": [
"apot_to_float",
"float_to_apot",
"quant_dequant_util",
"Tensor"
],
"torch.ao.sparsity": [
"BaseScheduler",
"BaseSparsifier",
"CubicSL",
"FakeSparsity",
"fqn_to_module",
"get_arg_info_from_tensor_fqn",
"get_dynamic_sparse_quantized_mapping",
"get_static_sparse_quantized_mapping",
"LambdaSL",
"module_to_fqn",
"NearlyDiagonalSparsifier",
"WeightNormSparsifier"
],
"torch.ao.sparsity.scheduler.base_scheduler": [
"BaseScheduler"
],
"torch.ao.sparsity.scheduler.cubic_scheduler": [
"CubicSL"
],
"torch.ao.sparsity.scheduler.lambda_scheduler": [
"LambdaSL"
],
"torch.ao.sparsity.sparsifier.base_sparsifier": [
"BaseSparsifier"
],
"torch.ao.sparsity.sparsifier.nearly_diagonal_sparsifier": [
"NearlyDiagonalSparsifier"
],
"torch.ao.sparsity.sparsifier.utils": [
"FakeSparsity",
"fqn_to_module",
"get_arg_info_from_tensor_fqn",
"module_to_fqn"
],
"torch.ao.sparsity.sparsifier.weight_norm_sparsifier": [
"WeightNormSparsifier"
],
"torch.csrc.jit.tensorexpr.codegen_external": [
"FileManager",
"parse_native_yaml"
],
"torch.distributed.checkpoint.examples.async_checkpointing_example": [
"FSDP",
"init_device_mesh"
],
"torch.distributed.checkpoint.examples.fsdp_checkpoint_example": [
"FSDP",
"load_sharded_optimizer_state_dict",
"StateDictType"
],
"torch.distributed.checkpoint.examples.stateful_example": [
"FSDP",
"init_device_mesh"
],
"torch.distributed.elastic.events.fb.scuba": [
"await_sync",
"cast",
"Dict",
"Enum",
"Event",
"EventMetadataValue",
"List",
"Optional",
"RdzvEvent",
"RuntimeEnvironment",
"TorchelasticRdzvLogEntry",
"TorchelasticStatusLogEntry",
"WhenceScribeLogged"
],
"torch.distributed.elastic.metrics.fb.service_data_metrics": [
"MetricHandler",
"ServiceDataMetrics"
],
"torch.distributed.elastic.metrics.static_init": [
"configure",
"get_logger",
"MetricsConfig",
"Optional",
"ServiceDataMetricsHandler",
"TorchElasticService"
],
"torch.distributed.elastic.multiprocessing.errors.fb.error_handler_fb": [
"Any",
"Dict",
"ErrorHandler",
"format_exception",
"generate_python_trace",
"MastReplyFileErrorCode",
"Optional",
"RuntimeEnvironment",
"RuntimeEnvironmentScheduler",
"write_formatted_message"
],
"torch.distributed.elastic.multiprocessing.errors.handlers": [
"ErrorHandlerFB"
],
"torch.distributed.elastic.rendezvous.fb.mast_rendezvous": [
"create_c10d_store",
"DistNetworkError",
"DistStoreError",
"get_logger",
"List",
"Optional",
"RendezvousClosedError",
"RendezvousHandler",
"RendezvousParameters",
"RendezvousTimeoutError",
"Tuple"
],
"torch.distributed.elastic.rendezvous.fb.zeus": [
"gethostname",
"get_logger",
"namedtuple",
"Optional",
"RendezvousClosedError",
"RendezvousHandler",
"RendezvousParameters",
"RendezvousTimeoutError"
],
"torch.distributed.elastic.rendezvous.registry": [
"create_handler",
"RendezvousHandler",
"RendezvousParameters"
],
"torch.distributed.logging_handlers": [
"C10D_CATEGORY",
"Dict",
"LogCategory",
"Optional",
"Sample",
"ScubaData",
"signpost",
"SignpostType"
],
"torch.utils.benchmark.examples.sparse.op_benchmark": [
"BinaryOpSparseFuzzer",
"Timer",
"UnaryOpSparseFuzzer"
],
"torch.version": [
"get_file_path"
],
"torch.ao.nn.intrinsic.modules": [
"_FusedModule"
],
"torch.distributed.benchmarks.benchmark_ddp_rpc": [
"BackendType",
"DDP",
"DistributedOptimizer",
"RRef",
"TensorPipeRpcBackendOptions"
],
"torch.distributed.pipelining": [
"Pipe",
"PipelineStage",
"SplitPoint",
"pipe_split",
"pipeline"
],
"torch.distributed.pipelining.microbatch": [
"Any",
"Dict",
"List",
"Optional",
"Tuple",
"tree_flatten",
"tree_unflatten"
],
"torch.export": [
"Constraint",
"ShapesCollection"
],
"torch.export.dynamic_shapes": [
"Constraint",
"ShapesCollection"
],
"torch.export.graph_signature": [
"TokenArgument"
],
"torch.fx.experimental.shape_inference.infer_shape": [
"DimDynamic",
"FakeTensorMode",
"LocalSource",
"ShapeEnv",
"defaultdict",
"infer_symbol_values",
"make_fx"
],
"torch.fx.experimental.shape_inference.infer_symbol_values": [
"Any",
"DefaultDict",
"Dict",
"List",
"Tuple",
"Union"
],
"torch.fx.passes.runtime_assert": [
"Any",
"Dict",
"GraphModule",
"Optional",
"Set",
"ShapeEnv",
"SymNode",
"compatibility",
"lazy_format_graph_code"
],
"torch.library": [
"opcheck",
"register_autograd",
"register_kernel"
],
"torch.mtia": [
"DeferredMtiaCallError",
"StreamContext"
],
"torch.onnx.symbolic_helper": [
"Any",
"Callable",
"List",
"Literal",
"NoReturn",
"Number",
"Optional",
"Sequence",
"Set",
"Tuple",
"Union"
],
"torch.onnx.symbolic_opset18": [
"amax",
"amin",
"aminmax",
"embedding_bag",
"linalg_vector_norm",
"max",
"maximum",
"min",
"minimum"
],
"torch.onnx.symbolic_opset20": [
"_affine_grid_generator",
"_grid_sampler",
"convert_grid_sample_mode"
],
"torch.utils.data.datapipes.dataframe.dataframe_wrapper": [
"Any",
"Optional"
],
"torch.utils.hipify.hipify_python": [
"TrieNode"
]
}