Revert "[5/N][Easy] fix typo for usort config in pyproject.toml (kown -> known): sort torch (#127126)"

This reverts commit 7763c83af67eebfdd5185dbe6ce15ece2b992a0f.

Reverted https://github.com/pytorch/pytorch/pull/127126 on behalf of https://github.com/XuehaiPan due to Broken CI ([comment](https://github.com/pytorch/pytorch/pull/127126#issuecomment-2133044286))
This commit is contained in:
PyTorch MergeBot
2024-05-27 09:22:08 +00:00
parent 4608971f7a
commit 55c0ab2887
295 changed files with 468 additions and 573 deletions

View File

@ -1,6 +1,5 @@
from torchvision import models
import torch
from torchvision import models
print(torch.version.__version__)

View File

@ -4,11 +4,10 @@ MobileNetV2 TorchScript model, and dumps root ops used by the model for custom
build script to create a tailored build which only contains these used ops.
"""
import torch
import yaml
from torchvision import models
import torch
# Download and trace the model.
model = models.mobilenet_v2(weights=models.MobileNet_V2_Weights.IMAGENET1K_V1)
model.eval()

View File

@ -18,12 +18,11 @@ import sys
import time
import numpy as np
import torchvision
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
import torchvision
def allgather_object(obj):

View File

@ -3,10 +3,10 @@ import math
import os
import time
from benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm
import torch
import torch.nn as nn
from benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm
from torch.distributed import rpc
from torch.distributed.pipeline.sync import Pipe

View File

@ -3,10 +3,18 @@ import json
import os
from pathlib import Path
import torch
import torch.distributed as c10d
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
from data import data_map
from metrics.ProcessedMetricsPrinter import ProcessedMetricsPrinter
from models import model_map
from server import server_map
from torch.distributed.rpc import TensorPipeRpcBackendOptions
from torch.futures import wait_all
from torch.utils.data import DataLoader
from trainer import (
criterion_map,
ddp_hook_map,
@ -17,14 +25,6 @@ from trainer import (
trainer_map,
)
import torch
import torch.distributed as c10d
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
from torch.distributed.rpc import TensorPipeRpcBackendOptions
from torch.futures import wait_all
from torch.utils.data import DataLoader
def get_name(rank, args):
r"""

View File

@ -3,12 +3,12 @@ import threading
import time
from abc import ABC, abstractmethod
from metrics.MetricsLogger import MetricsLogger
from utils import sparse_rpc_format_to_tensor, sparse_tensor_to_rpc_format
import torch
import torch.distributed.rpc as rpc
from metrics.MetricsLogger import MetricsLogger
from utils import sparse_rpc_format_to_tensor, sparse_tensor_to_rpc_format
class ParameterServerBase(ABC):
PARAMETER_SERVER_BATCH_METRIC = "parameter_server_batch_metric"

View File

@ -1,7 +1,6 @@
from utils import process_bucket_with_remote_server
import torch
import torch.distributed as c10d
from utils import process_bucket_with_remote_server
def allreduce_hook(state, bucket):

View File

@ -2,10 +2,10 @@ import functools
import time
from abc import ABC, abstractmethod
from metrics.MetricsLogger import MetricsLogger
import torch
from metrics.MetricsLogger import MetricsLogger
class TrainerBase(ABC):
BATCH_LEVEL_METRIC = "batch_level_metric"

View File

@ -2,12 +2,12 @@ import time
import numpy as np
from agent import AgentBase
from observer import ObserverBase
import torch
import torch.distributed.rpc as rpc
from agent import AgentBase
from observer import ObserverBase
COORDINATOR_NAME = "coordinator"
AGENT_NAME = "agent"
OBSERVER_NAME = "observer{}"

View File

@ -4,11 +4,11 @@ import json
import os
import time
from coordinator import CoordinatorBase
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
from coordinator import CoordinatorBase
COORDINATOR_NAME = "coordinator"
AGENT_NAME = "agent"
OBSERVER_NAME = "observer{}"

View File

@ -1,10 +1,10 @@
import random
import time
from agent import AgentBase
import torch
import torch.distributed.rpc as rpc
from agent import AgentBase
from torch.distributed.rpc import rpc_sync

View File

@ -36,21 +36,20 @@ from typing import (
Type,
TYPE_CHECKING,
)
from typing_extensions import Self
from unittest.mock import MagicMock
import numpy as np
import pandas as pd
import psutil
from scipy.stats import gmean, ttest_ind
from tqdm.auto import tqdm, trange
import torch
import torch._dynamo
import torch._dynamo.utils
import torch._export
import torch.distributed
import torch.multiprocessing as mp
from scipy.stats import gmean, ttest_ind
from torch._C import _has_cuda as HAS_CUDA, _has_xpu as HAS_XPU
from torch._dynamo.profiler import fx_insert_profiling, Profiler
from torch._dynamo.testing import (
@ -60,6 +59,8 @@ from torch._dynamo.testing import (
same,
)
from tqdm.auto import tqdm, trange
try:
from torch._dynamo.utils import (
clone_inputs,
@ -73,14 +74,15 @@ except ImportError:
graph_break_reasons,
maybe_enable_compiled_autograd,
)
import torch._functorch.config
from torch._functorch.aot_autograd import set_model_name
from torch._inductor import config as inductor_config, metrics
from torch._subclasses.fake_tensor import FakeTensorMode
from torch.utils import _pytree as pytree
from torch.utils._pytree import tree_map, tree_map_only
try:
import torch_xla
import torch_xla.core.xla_model as xm
@ -2341,16 +2343,16 @@ class BenchmarkRunner:
def get_fsdp_auto_wrap_policy(self, model_name: str):
from diffusers.models.transformer_2d import Transformer2DModel
from torchbenchmark.models.nanogpt.model import Block
from transformers.models.llama.modeling_llama import LlamaDecoderLayer
from transformers.models.t5.modeling_t5 import T5Block
from transformers.models.whisper.modeling_whisper import WhisperEncoderLayer
from torch.distributed.fsdp.wrap import (
ModuleWrapPolicy,
size_based_auto_wrap_policy,
)
from torchbenchmark.models.nanogpt.model import Block
from transformers.models.llama.modeling_llama import LlamaDecoderLayer
from transformers.models.t5.modeling_t5 import T5Block
from transformers.models.whisper.modeling_whisper import WhisperEncoderLayer
# handcrafted wrap policy
MODEL_FSDP_WRAP = {

View File

@ -7,9 +7,8 @@ import subprocess
import sys
import warnings
from common import BenchmarkRunner, download_retry_decorator, main, reset_rng_state
import torch
from common import BenchmarkRunner, download_retry_decorator, main, reset_rng_state
from torch._dynamo.testing import collect_results
from torch._dynamo.utils import clone_inputs

View File

@ -1,11 +1,10 @@
# flake8: noqa
import triton
from prettytable import PrettyTable
import torch
import torch._dynamo
import torch._inductor.config
import triton
from prettytable import PrettyTable
# torch._inductor.config.debug = True
torch._inductor.config.triton.dense_indexing = True

View File

@ -1,10 +1,9 @@
from benchmark_helper import time_with_torch_timer
import torch
import torch._dynamo
import torch._dynamo.config
import torch._inductor.config as config
from benchmark_helper import time_with_torch_timer
@torch._dynamo.optimize("inductor", nopython=True)

View File

@ -1,9 +1,8 @@
import itertools
from benchmark_helper import time_with_torch_timer
import torch
import torch._dynamo
from benchmark_helper import time_with_torch_timer
@torch._dynamo.optimize("inductor", nopython=True)

View File

@ -1,11 +1,10 @@
import triton
from benchmark_helper import time_with_torch_timer
import torch
import torch._dynamo
import torch._dynamo.config
import torch._inductor.config as config
import triton
from benchmark_helper import time_with_torch_timer
# The flag below controls whether to allow TF32 on matmul. This flag defaults to True.
torch.backends.cuda.matmul.allow_tf32 = True

View File

@ -1,9 +1,8 @@
from benchmark_helper import time_with_torch_timer
import torch
import torch._dynamo
import torch._inductor.config as inductor_config
from benchmark_helper import time_with_torch_timer
inductor_config.triton.mm = "triton"

View File

@ -5,7 +5,6 @@ import sys
import numpy as np
import tabulate
import torch
import torch._inductor

View File

@ -1,9 +1,8 @@
#!/usr/bin/env python3
import click
import numpy as np
from operator_inp_utils import OperatorInputsLoader
import torch
from operator_inp_utils import OperatorInputsLoader
from torch._dynamo.backends.cudagraphs import cudagraphs_inner
from torch._dynamo.testing import same

View File

@ -47,13 +47,12 @@ import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import rcParams
from scipy.stats import gmean
from tabulate import tabulate
import torch
import torch._dynamo
from matplotlib import rcParams
from scipy.stats import gmean
from tabulate import tabulate
rcParams.update({"figure.autolayout": True})
plt.rc("axes", axisbelow=True)

View File

@ -7,9 +7,8 @@ import subprocess
import sys
import warnings
from common import BenchmarkRunner, download_retry_decorator, main
import torch
from common import BenchmarkRunner, download_retry_decorator, main
from torch._dynamo.testing import collect_results, reduce_to_scalar_loss
from torch._dynamo.utils import clone_inputs

View File

@ -10,9 +10,8 @@ import warnings
from collections import namedtuple
from os.path import abspath, exists
import yaml
import torch
import yaml
try:
from .common import BenchmarkRunner, main

View File

@ -5,13 +5,12 @@ import sys
import time
from datetime import timedelta
from datasets import load_dataset, load_metric
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import torch._dynamo
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
torch.backends.cuda.matmul.allow_tf32 = True

View File

@ -1,9 +1,8 @@
from collections import namedtuple
from functools import partial
import torchvision.models as cnn
import torch
import torchvision.models as cnn
from .factory import (
dropoutlstm_creator,

View File

@ -1,5 +1,4 @@
import pytest
import torch
from .fuser import set_fuser

View File

@ -1,6 +1,5 @@
from utils import NUM_LOOP_ITERS
import torch
from utils import NUM_LOOP_ITERS
def add_tensors_loop(x, y):

View File

@ -1,10 +1,10 @@
import torch
import torchaudio_models as models
from torch import nn, Tensor
from utils import check_for_functorch, extract_weights, GetterReturnType, load_weights
import torch
from torch import nn, Tensor
has_functorch = check_for_functorch()

View File

@ -1,9 +1,9 @@
from utils import GetterReturnType
import torch
import torch.distributions as dist
from torch import Tensor
from utils import GetterReturnType
def get_simple_regression(device: torch.device) -> GetterReturnType:
N = 10

View File

@ -1,12 +1,11 @@
from typing import cast
import torch
import torchvision_models as models
from torch import Tensor
from utils import check_for_functorch, extract_weights, GetterReturnType, load_weights
import torch
from torch import Tensor
has_functorch = check_for_functorch()

View File

@ -4,7 +4,6 @@ import sys
import time
import click
import torch
torch.set_num_threads(1)

View File

@ -6,6 +6,8 @@ import os
import time
from typing import Optional, Tuple
import torch
import torch._inductor.config
from mixtral_moe_model import Transformer as MixtralMoE
from mixtral_moe_quantize import (
WeightOnlyInt8QuantHandler as MixtralMoEWeightOnlyInt8QuantHandler,
@ -13,9 +15,6 @@ from mixtral_moe_quantize import (
from model import Transformer as LLaMA
from quantize import WeightOnlyInt8QuantHandler as LLaMAWeightOnlyInt8QuantHandler
import torch
import torch._inductor.config
torch._inductor.config.coordinate_descent_tuning = True
torch._inductor.config.triton.unique_kernel_names = True
torch._inductor.config.fx_graph_cache = True # Experimental feature to reduce compilation times, will be on by default in future

View File

@ -1,10 +1,10 @@
# flake8: noqa: E266, C417, B950
from mixtral_moe_model import ConditionalFeedForward
import torch
import torch.nn as nn
import torch.nn.functional as F
from mixtral_moe_model import ConditionalFeedForward
##### Quantization Primitives ######

View File

@ -187,9 +187,8 @@ class BackendWorker:
def _setup(self):
import time
from torchvision.models.resnet import BasicBlock, ResNet
import torch
from torchvision.models.resnet import BasicBlock, ResNet
# Create ResNet18 on meta device
with torch.device("meta"):

View File

@ -4,11 +4,11 @@ import shutil
import textwrap
from typing import List, Optional, Tuple
from torch.utils.benchmark.utils.common import _make_temp_dir
from core.api import GroupedBenchmark, TimerArgs
from core.types import Definition, FlatIntermediateDefinition, Label
from torch.utils.benchmark.utils.common import _make_temp_dir
_TEMPDIR: Optional[str] = None

View File

@ -7,7 +7,6 @@ from collections import namedtuple
import benchmark_utils
import numpy as np
import torch
# needs to be imported after torch

View File

@ -2,7 +2,6 @@ import json
import time
import benchmark_cpp_extension # noqa: F401
import torch

View File

@ -1,7 +1,6 @@
import time
import numpy as np
import torch
"""Microbenchmarks for Tensor repeat operator. Supports PyTorch."""

View File

@ -1,5 +1,4 @@
import operator_benchmark as op_bench
import torch

View File

@ -1,5 +1,4 @@
import operator_benchmark as op_bench
import torch
intraop_bench_configs = op_bench.config_list(

View File

@ -1,5 +1,4 @@
import operator_benchmark as op_bench
import torch

View File

@ -1,5 +1,4 @@
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch."""

View File

@ -1,5 +1,4 @@
import operator_benchmark as op_bench
import torch

View File

@ -1,5 +1,4 @@
import operator_benchmark as op_bench
import torch

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""
# Configs for PT add operator

View File

@ -1,9 +1,10 @@
import operator_benchmark as op_bench
import torch
from torch import nn
from torch.ao import pruning
import operator_benchmark as op_bench
"""Microbenchmarks for sparsifier."""

View File

@ -1,9 +1,9 @@
from typing import List
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for as_strided operator"""

View File

@ -1,7 +1,8 @@
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
import operator_benchmark as op_bench
"""Microbenchmarks for batchnorm operator."""

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for binary operators."""

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""

View File

@ -1,10 +1,10 @@
import random
from typing import List
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for Cat operator"""

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for channel_shuffle operator."""

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for Chunk operator"""

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for ClipRanges operator."""
torch.ops.load_library("//caffe2/torch/fb/sparsenn:sparsenn_operators")

View File

@ -1,8 +1,9 @@
import torch
import torch.nn as nn
from pt import configs
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""
Microbenchmarks for Conv1d and ConvTranspose1d operators.

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for diag operator"""

View File

@ -1,8 +1,8 @@
import numpy
import torch
from pt import configs
import operator_benchmark as op_bench
import torch
"""Embedding and EmbeddingBag Operator Benchmark"""

View File

@ -1,8 +1,9 @@
import operator_benchmark as op_bench
import torch
from torch.testing._internal.common_device_type import get_all_device_types
import operator_benchmark as op_bench
"""Microbenchmark for Fill_ operator."""
fill_short_configs = op_bench.config_list(

View File

@ -1,7 +1,7 @@
import numpy
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for gather operator."""

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""
Microbenchmarks for the gelu operators.

View File

@ -1,7 +1,8 @@
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
import operator_benchmark as op_bench
"""Microbenchmarks for groupnorm operator."""

View File

@ -1,7 +1,8 @@
import operator_benchmark as op_bench
import torch
import torch.nn as nn
import operator_benchmark as op_bench
"""
Microbenchmarks for the hardsigmoid operator.

View File

@ -1,7 +1,8 @@
import operator_benchmark as op_bench
import torch
import torch.nn as nn
import operator_benchmark as op_bench
"""
Microbenchmarks for the hardswish operators.

View File

@ -1,7 +1,7 @@
import numpy
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for index_select operator."""

View File

@ -1,7 +1,8 @@
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
import operator_benchmark as op_bench
"""Microbenchmarks for instancenorm operator."""

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for interpolate operator."""

View File

@ -1,7 +1,8 @@
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
import operator_benchmark as op_bench
"""Microbenchmarks for layernorm operator."""

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for linear_prepack_fp16_ operator. Supports both Caffe2/PyTorch."""
# Configs for PT linear_prepack_fp16 operator

View File

@ -1,8 +1,9 @@
import torch
import torch.nn as nn
from pt import configs
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""Microbenchmarks for Linear operator."""

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for linear_unpack_fp16_ operator. Supports both Caffe2/PyTorch."""
# Configs for PT linear_unpack_fp16 operator

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for MatMul operator"""
# Configs for PT Matmul operator

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""
Microbenchmarks for batch matrix mult with einsum and torch.bmm.
"""

View File

@ -1,9 +1,9 @@
import math
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for torch.nan_to_num / nan_to_num_ operators"""

View File

@ -1,7 +1,8 @@
import operator_benchmark as op_bench
import torch
import torch.nn as nn
import operator_benchmark as op_bench
"""
Microbenchmarks for MaxPool1d and AvgPool1d operators.
"""

View File

@ -1,7 +1,8 @@
import operator_benchmark as op_bench
import torch
import torch.ao.nn.quantized.functional as qF
import operator_benchmark as op_bench
r"""Microbenchmarks for the quantized activations."""
qactivation_long_configs = op_bench.cross_product_configs(

View File

@ -1,7 +1,8 @@
import operator_benchmark as op_bench
import torch
from torch._ops import ops
import operator_benchmark as op_bench
qarithmetic_binary_configs = op_bench.cross_product_configs(
N=(2, 8, 64, 512),
dtype=(torch.quint8, torch.qint8, torch.qint32),

View File

@ -1,11 +1,11 @@
import numpy
from pt import configs
import operator_benchmark as op_bench
import torch
import torch.ao.nn.qat as nnqat
from pt import configs
from torch.ao.quantization import default_embedding_qat_qconfig
import operator_benchmark as op_bench
"""
Microbenchmarks for QAT Embedding + EmbeddingBag operators.
"""

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for quantized batchnorm operator."""

View File

@ -1,10 +1,10 @@
from typing import List
import operator_benchmark as op_bench
import torch
import torch.ao.nn.quantized as nnq
import operator_benchmark as op_bench
"""Microbenchmarks for quantized Cat operator"""

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
qcomparators_configs = op_bench.cross_product_configs(
N=(8, 64),
dtype=(torch.quint8, torch.qint8, torch.qint32),

View File

@ -1,8 +1,9 @@
import torch
import torch.ao.nn.quantized as nnq
from pt import configs
import operator_benchmark as op_bench
import torch
import torch.ao.nn.quantized as nnq
"""
Microbenchmarks for qConv operators.

View File

@ -1,12 +1,12 @@
from typing import Optional
import numpy as np
import operator_benchmark as op_bench
import torch
from torch.testing._internal.common_quantization import lengths_to_offsets
import operator_benchmark as op_bench
torch.ops.load_library("//caffe2/torch/fb/sparsenn:sparsenn_operators")

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
embeddingbag_conversion_short_configs = op_bench.cross_product_configs(
num_embeddings=(80,), embedding_dim=(128, 256, 512), tags=("short",)
)

View File

@ -1,9 +1,9 @@
import numpy
import torch
import torch.ao.nn.quantized as nnq
from pt import configs
import operator_benchmark as op_bench
import torch
import torch.ao.nn.quantized as nnq
"""
Microbenchmarks for qEmbeddingBag operators.

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for quantized groupnorm operator."""

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for quantized instancenorm operator."""

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for the quantized interpolate op.
Note: We are not benchmarking `upsample` as it is being deprecated, and calls

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for quantized layernorm operator."""

View File

@ -1,10 +1,11 @@
from pt import configs
import operator_benchmark as op_bench
import torch
import torch.ao.nn.quantized as nnq
import torch.ao.nn.quantized.dynamic as nnqd
from pt import configs
import operator_benchmark as op_bench
"""
Microbenchmarks for Quantized Linear operators.
"""

View File

@ -1,7 +1,8 @@
import operator_benchmark as op_bench
import torch
import torch.ao.quantization.observer as obs
import operator_benchmark as op_bench
qobserver_short_configs_dict = {
"attr_names": ("C", "M", "N", "dtype", "device"),
"attrs": (

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
# 2D pooling will have input matrix of rank 3 or 4
qpool2d_long_configs = op_bench.config_list(
attrs=(

View File

@ -1,7 +1,8 @@
import operator_benchmark as op_bench
import torch
from torch import nn
import operator_benchmark as op_bench
"""
Microbenchmarks for RNNs.
"""

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
# Configs for pointwise and reduction unary ops
qmethods_configs_short = op_bench.config_list(
attr_names=["M", "N"],

View File

@ -1,9 +1,10 @@
import operator_benchmark as op_bench
import torch
import torch.ao.nn.quantized as nnq
import torch.ao.quantization as tq
import torch.nn as nn
import operator_benchmark as op_bench
"""Microbenchmarks for general quantization operations."""
# mode is used to show the direction of the benchmark:

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for quantized unary operators (point-wise and reduction)."""

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for remainder operators."""

View File

@ -1,7 +1,8 @@
import operator_benchmark as op_bench
import torch
import torch.nn as nn
import operator_benchmark as op_bench
"""
Microbenchmarks for the softmax operators.

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for Split operator"""

View File

@ -1,10 +1,10 @@
import random
from typing import List
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for Stack operator"""

View File

@ -1,6 +1,7 @@
import operator_benchmark as op_bench
import torch
import operator_benchmark as op_bench
"""Microbenchmarks for sum reduction operator."""
# Configs for PT add operator

Some files were not shown because too many files have changed in this diff Show More