[5/N][Easy] fix typo for usort config in pyproject.toml (kown -> known): sort torch (#127126)

The `usort` config in `pyproject.toml` has no effect due to a typo. Fixing the typo make `usort` do more and generate the changes in the PR. Except `pyproject.toml`, all changes are generated by `lintrunner -a --take UFMT --all-files`.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/127126
Approved by: https://github.com/kit1980
This commit is contained in:
Xuehai Pan
2024-05-27 09:47:26 +00:00
committed by PyTorch MergeBot
parent c7f6fbfa9d
commit 26f4f10ac8
296 changed files with 574 additions and 468 deletions

View File

@ -1,6 +1,7 @@
import torch
from torchvision import models
import torch
print(torch.version.__version__)
resnet18 = models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1)

View File

@ -4,10 +4,11 @@ MobileNetV2 TorchScript model, and dumps root ops used by the model for custom
build script to create a tailored build which only contains these used ops.
"""
import torch
import yaml
from torchvision import models
import torch
# Download and trace the model.
model = models.mobilenet_v2(weights=models.MobileNet_V2_Weights.IMAGENET1K_V1)
model.eval()

View File

@ -18,11 +18,12 @@ import sys
import time
import numpy as np
import torchvision
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
import torchvision
def allgather_object(obj):

View File

@ -3,10 +3,10 @@ import math
import os
import time
from benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm
import torch
import torch.nn as nn
from benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm
from torch.distributed import rpc
from torch.distributed.pipeline.sync import Pipe

View File

@ -3,18 +3,10 @@ import json
import os
from pathlib import Path
import torch
import torch.distributed as c10d
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
from data import data_map
from metrics.ProcessedMetricsPrinter import ProcessedMetricsPrinter
from models import model_map
from server import server_map
from torch.distributed.rpc import TensorPipeRpcBackendOptions
from torch.futures import wait_all
from torch.utils.data import DataLoader
from trainer import (
criterion_map,
ddp_hook_map,
@ -25,6 +17,14 @@ from trainer import (
trainer_map,
)
import torch
import torch.distributed as c10d
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
from torch.distributed.rpc import TensorPipeRpcBackendOptions
from torch.futures import wait_all
from torch.utils.data import DataLoader
def get_name(rank, args):
r"""

View File

@ -3,12 +3,12 @@ import threading
import time
from abc import ABC, abstractmethod
import torch
import torch.distributed.rpc as rpc
from metrics.MetricsLogger import MetricsLogger
from utils import sparse_rpc_format_to_tensor, sparse_tensor_to_rpc_format
import torch
import torch.distributed.rpc as rpc
class ParameterServerBase(ABC):
PARAMETER_SERVER_BATCH_METRIC = "parameter_server_batch_metric"

View File

@ -1,6 +1,7 @@
from utils import process_bucket_with_remote_server
import torch
import torch.distributed as c10d
from utils import process_bucket_with_remote_server
def allreduce_hook(state, bucket):

View File

@ -2,10 +2,10 @@ import functools
import time
from abc import ABC, abstractmethod
import torch
from metrics.MetricsLogger import MetricsLogger
import torch
class TrainerBase(ABC):
BATCH_LEVEL_METRIC = "batch_level_metric"

View File

@ -2,12 +2,12 @@ import time
import numpy as np
import torch
import torch.distributed.rpc as rpc
from agent import AgentBase
from observer import ObserverBase
import torch
import torch.distributed.rpc as rpc
COORDINATOR_NAME = "coordinator"
AGENT_NAME = "agent"
OBSERVER_NAME = "observer{}"

View File

@ -4,11 +4,11 @@ import json
import os
import time
from coordinator import CoordinatorBase
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
from coordinator import CoordinatorBase
COORDINATOR_NAME = "coordinator"
AGENT_NAME = "agent"
OBSERVER_NAME = "observer{}"

View File

@ -1,10 +1,10 @@
import random
import time
from agent import AgentBase
import torch
import torch.distributed.rpc as rpc
from agent import AgentBase
from torch.distributed.rpc import rpc_sync

View File

@ -36,20 +36,21 @@ from typing import (
Type,
TYPE_CHECKING,
)
from typing_extensions import Self
from unittest.mock import MagicMock
import numpy as np
import pandas as pd
import psutil
from scipy.stats import gmean, ttest_ind
from tqdm.auto import tqdm, trange
import torch
import torch._dynamo
import torch._dynamo.utils
import torch._export
import torch.distributed
import torch.multiprocessing as mp
from scipy.stats import gmean, ttest_ind
from torch._C import _has_cuda as HAS_CUDA, _has_xpu as HAS_XPU
from torch._dynamo.profiler import fx_insert_profiling, Profiler
from torch._dynamo.testing import (
@ -59,8 +60,6 @@ from torch._dynamo.testing import (
same,
)
from tqdm.auto import tqdm, trange
try:
from torch._dynamo.utils import (
clone_inputs,
@ -74,15 +73,14 @@ except ImportError:
graph_break_reasons,
maybe_enable_compiled_autograd,
)
import torch._functorch.config
from torch._functorch.aot_autograd import set_model_name
from torch._inductor import config as inductor_config, metrics
from torch._subclasses.fake_tensor import FakeTensorMode
from torch.utils import _pytree as pytree
from torch.utils._pytree import tree_map, tree_map_only
try:
import torch_xla
import torch_xla.core.xla_model as xm
@ -2343,17 +2341,17 @@ class BenchmarkRunner:
def get_fsdp_auto_wrap_policy(self, model_name: str):
from diffusers.models.transformer_2d import Transformer2DModel
from torch.distributed.fsdp.wrap import (
ModuleWrapPolicy,
size_based_auto_wrap_policy,
)
from torchbenchmark.models.nanogpt.model import Block
from transformers.models.llama.modeling_llama import LlamaDecoderLayer
from transformers.models.t5.modeling_t5 import T5Block
from transformers.models.whisper.modeling_whisper import WhisperEncoderLayer
from torch.distributed.fsdp.wrap import (
ModuleWrapPolicy,
size_based_auto_wrap_policy,
)
# handcrafted wrap policy
MODEL_FSDP_WRAP = {
"stable_diffusion_unet": (Transformer2DModel,),

View File

@ -7,9 +7,10 @@ import subprocess
import sys
import warnings
import torch
from common import BenchmarkRunner, download_retry_decorator, main, reset_rng_state
import torch
from torch._dynamo.testing import collect_results
from torch._dynamo.utils import clone_inputs

View File

@ -1,10 +1,11 @@
# flake8: noqa
import triton
from prettytable import PrettyTable
import torch
import torch._dynamo
import torch._inductor.config
import triton
from prettytable import PrettyTable
# torch._inductor.config.debug = True
torch._inductor.config.triton.dense_indexing = True

View File

@ -1,9 +1,10 @@
from benchmark_helper import time_with_torch_timer
import torch
import torch._dynamo
import torch._dynamo.config
import torch._inductor.config as config
from benchmark_helper import time_with_torch_timer
@torch._dynamo.optimize("inductor", nopython=True)

View File

@ -1,8 +1,9 @@
import itertools
from benchmark_helper import time_with_torch_timer
import torch
import torch._dynamo
from benchmark_helper import time_with_torch_timer
@torch._dynamo.optimize("inductor", nopython=True)

View File

@ -1,10 +1,11 @@
import triton
from benchmark_helper import time_with_torch_timer
import torch
import torch._dynamo
import torch._dynamo.config
import torch._inductor.config as config
import triton
from benchmark_helper import time_with_torch_timer
# The flag below controls whether to allow TF32 on matmul. This flag defaults to True.
torch.backends.cuda.matmul.allow_tf32 = True

View File

@ -1,8 +1,9 @@
from benchmark_helper import time_with_torch_timer
import torch
import torch._dynamo
import torch._inductor.config as inductor_config
from benchmark_helper import time_with_torch_timer
inductor_config.triton.mm = "triton"

View File

@ -5,6 +5,7 @@ import sys
import numpy as np
import tabulate
import torch
import torch._inductor

View File

@ -1,9 +1,10 @@
#!/usr/bin/env python3
import click
import numpy as np
import torch
from operator_inp_utils import OperatorInputsLoader
import torch
from torch._dynamo.backends.cudagraphs import cudagraphs_inner
from torch._dynamo.testing import same
from torch._inductor.compile_fx import compile_fx

View File

@ -47,13 +47,14 @@ import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch._dynamo
from matplotlib import rcParams
from scipy.stats import gmean
from tabulate import tabulate
import torch
import torch._dynamo
rcParams.update({"figure.autolayout": True})
plt.rc("axes", axisbelow=True)

View File

@ -7,9 +7,10 @@ import subprocess
import sys
import warnings
import torch
from common import BenchmarkRunner, download_retry_decorator, main
import torch
from torch._dynamo.testing import collect_results, reduce_to_scalar_loss
from torch._dynamo.utils import clone_inputs

View File

@ -10,9 +10,10 @@ import warnings
from collections import namedtuple
from os.path import abspath, exists
import torch
import yaml
import torch
try:
from .common import BenchmarkRunner, main
except ImportError:

View File

@ -5,12 +5,13 @@ import sys
import time
from datetime import timedelta
from datasets import load_dataset, load_metric
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import torch._dynamo
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
torch.backends.cuda.matmul.allow_tf32 = True

View File

@ -1,9 +1,10 @@
from collections import namedtuple
from functools import partial
import torch
import torchvision.models as cnn
import torch
from .factory import (
dropoutlstm_creator,
imagenet_cnn_creator,

View File

@ -1,4 +1,5 @@
import pytest
import torch
from .fuser import set_fuser

View File

@ -1,6 +1,7 @@
import torch
from utils import NUM_LOOP_ITERS
import torch
def add_tensors_loop(x, y):
z = torch.add(x, y)

View File

@ -1,10 +1,10 @@
import torch
import torchaudio_models as models
from torch import nn, Tensor
from utils import check_for_functorch, extract_weights, GetterReturnType, load_weights
import torch
from torch import nn, Tensor
has_functorch = check_for_functorch()

View File

@ -1,9 +1,9 @@
from utils import GetterReturnType
import torch
import torch.distributions as dist
from torch import Tensor
from utils import GetterReturnType
def get_simple_regression(device: torch.device) -> GetterReturnType:
N = 10

View File

@ -1,11 +1,12 @@
from typing import cast
import torch
import torchvision_models as models
from torch import Tensor
from utils import check_for_functorch, extract_weights, GetterReturnType, load_weights
import torch
from torch import Tensor
has_functorch = check_for_functorch()

View File

@ -4,6 +4,7 @@ import sys
import time
import click
import torch
torch.set_num_threads(1)

View File

@ -6,8 +6,6 @@ import os
import time
from typing import Optional, Tuple
import torch
import torch._inductor.config
from mixtral_moe_model import Transformer as MixtralMoE
from mixtral_moe_quantize import (
WeightOnlyInt8QuantHandler as MixtralMoEWeightOnlyInt8QuantHandler,
@ -15,6 +13,9 @@ from mixtral_moe_quantize import (
from model import Transformer as LLaMA
from quantize import WeightOnlyInt8QuantHandler as LLaMAWeightOnlyInt8QuantHandler
import torch
import torch._inductor.config
torch._inductor.config.coordinate_descent_tuning = True
torch._inductor.config.triton.unique_kernel_names = True
torch._inductor.config.fx_graph_cache = True # Experimental feature to reduce compilation times, will be on by default in future

View File

@ -1,10 +1,10 @@
# flake8: noqa: E266, C417, B950
from mixtral_moe_model import ConditionalFeedForward
import torch
import torch.nn as nn
import torch.nn.functional as F
from mixtral_moe_model import ConditionalFeedForward
##### Quantization Primitives ######

View File

@ -187,9 +187,10 @@ class BackendWorker:
def _setup(self):
import time
import torch
from torchvision.models.resnet import BasicBlock, ResNet
import torch
# Create ResNet18 on meta device
with torch.device("meta"):
m = ResNet(BasicBlock, [2, 2, 2, 2])

View File

@ -4,11 +4,11 @@ import shutil
import textwrap
from typing import List, Optional, Tuple
from torch.utils.benchmark.utils.common import _make_temp_dir
from core.api import GroupedBenchmark, TimerArgs
from core.types import Definition, FlatIntermediateDefinition, Label
from torch.utils.benchmark.utils.common import _make_temp_dir
_TEMPDIR: Optional[str] = None

View File

@ -7,6 +7,7 @@ from collections import namedtuple
import benchmark_utils
import numpy as np
import torch
# needs to be imported after torch

View File

@ -2,6 +2,7 @@ import json
import time
import benchmark_cpp_extension # noqa: F401
import torch

View File

@ -1,6 +1,7 @@
import time
import numpy as np
import torch
"""Microbenchmarks for Tensor repeat operator. Supports PyTorch."""

View File

@ -1,4 +1,5 @@
import operator_benchmark as op_bench
import torch

View File

@ -1,4 +1,5 @@
import operator_benchmark as op_bench
import torch
intraop_bench_configs = op_bench.config_list(

View File

@ -1,4 +1,5 @@
import operator_benchmark as op_bench
import torch

View File

@ -1,4 +1,5 @@
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch."""

View File

@ -1,4 +1,5 @@
import operator_benchmark as op_bench
import torch

View File

@ -1,4 +1,5 @@
import operator_benchmark as op_bench
import torch

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""

View File

@ -1,10 +1,9 @@
import operator_benchmark as op_bench
import torch
from torch import nn
from torch.ao import pruning
import operator_benchmark as op_bench
"""Microbenchmarks for sparsifier."""

View File

@ -1,9 +1,9 @@
from typing import List
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for as_strided operator"""

View File

@ -1,8 +1,7 @@
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
import operator_benchmark as op_bench
"""Microbenchmarks for batchnorm operator."""

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for binary operators."""

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""

View File

@ -1,10 +1,10 @@
import random
from typing import List
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for Cat operator"""

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for channel_shuffle operator."""

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for Chunk operator"""

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for ClipRanges operator."""

View File

@ -1,9 +1,8 @@
import torch
import torch.nn as nn
from pt import configs
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""
Microbenchmarks for Conv1d and ConvTranspose1d operators.

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for diag operator"""

View File

@ -1,8 +1,8 @@
import numpy
import torch
from pt import configs
import operator_benchmark as op_bench
import torch
"""Embedding and EmbeddingBag Operator Benchmark"""

View File

@ -1,9 +1,8 @@
import operator_benchmark as op_bench
import torch
from torch.testing._internal.common_device_type import get_all_device_types
import operator_benchmark as op_bench
"""Microbenchmark for Fill_ operator."""
fill_short_configs = op_bench.config_list(

View File

@ -1,7 +1,7 @@
import numpy
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for gather operator."""

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""

View File

@ -1,8 +1,7 @@
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
import operator_benchmark as op_bench
"""Microbenchmarks for groupnorm operator."""

View File

@ -1,8 +1,7 @@
import operator_benchmark as op_bench
import torch
import torch.nn as nn
import operator_benchmark as op_bench
"""
Microbenchmarks for the hardsigmoid operator.

View File

@ -1,8 +1,7 @@
import operator_benchmark as op_bench
import torch
import torch.nn as nn
import operator_benchmark as op_bench
"""
Microbenchmarks for the hardswish operators.

View File

@ -1,7 +1,7 @@
import numpy
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for index_select operator."""

View File

@ -1,8 +1,7 @@
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
import operator_benchmark as op_bench
"""Microbenchmarks for instancenorm operator."""

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for interpolate operator."""

View File

@ -1,8 +1,7 @@
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
import operator_benchmark as op_bench
"""Microbenchmarks for layernorm operator."""

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for linear_prepack_fp16_ operator. Supports both Caffe2/PyTorch."""

View File

@ -1,9 +1,8 @@
import torch
import torch.nn as nn
from pt import configs
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""Microbenchmarks for Linear operator."""

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for linear_unpack_fp16_ operator. Supports both Caffe2/PyTorch."""

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for MatMul operator"""

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""
Microbenchmarks for batch matrix mult with einsum and torch.bmm.

View File

@ -1,9 +1,9 @@
import math
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for torch.nan_to_num / nan_to_num_ operators"""

View File

@ -1,8 +1,7 @@
import operator_benchmark as op_bench
import torch
import torch.nn as nn
import operator_benchmark as op_bench
"""
Microbenchmarks for MaxPool1d and AvgPool1d operators.
"""

View File

@ -1,8 +1,7 @@
import operator_benchmark as op_bench
import torch
import torch.ao.nn.quantized.functional as qF
import operator_benchmark as op_bench
r"""Microbenchmarks for the quantized activations."""
qactivation_long_configs = op_bench.cross_product_configs(

View File

@ -1,8 +1,7 @@
import operator_benchmark as op_bench
import torch
from torch._ops import ops
import operator_benchmark as op_bench
qarithmetic_binary_configs = op_bench.cross_product_configs(
N=(2, 8, 64, 512),
dtype=(torch.quint8, torch.qint8, torch.qint32),

View File

@ -1,10 +1,10 @@
import numpy
import torch
import torch.ao.nn.qat as nnqat
from pt import configs
from torch.ao.quantization import default_embedding_qat_qconfig
import operator_benchmark as op_bench
import torch
import torch.ao.nn.qat as nnqat
from torch.ao.quantization import default_embedding_qat_qconfig
"""
Microbenchmarks for QAT Embedding + EmbeddingBag operators.

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for quantized batchnorm operator."""

View File

@ -1,10 +1,10 @@
from typing import List
import operator_benchmark as op_bench
import torch
import torch.ao.nn.quantized as nnq
import operator_benchmark as op_bench
"""Microbenchmarks for quantized Cat operator"""

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
qcomparators_configs = op_bench.cross_product_configs(
N=(8, 64),

View File

@ -1,9 +1,8 @@
import torch
import torch.ao.nn.quantized as nnq
from pt import configs
import operator_benchmark as op_bench
import torch
import torch.ao.nn.quantized as nnq
"""
Microbenchmarks for qConv operators.

View File

@ -1,12 +1,12 @@
from typing import Optional
import numpy as np
import operator_benchmark as op_bench
import torch
from torch.testing._internal.common_quantization import lengths_to_offsets
import operator_benchmark as op_bench
torch.ops.load_library("//caffe2/torch/fb/sparsenn:sparsenn_operators")

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
embeddingbag_conversion_short_configs = op_bench.cross_product_configs(
num_embeddings=(80,), embedding_dim=(128, 256, 512), tags=("short",)

View File

@ -1,9 +1,9 @@
import numpy
import torch
import torch.ao.nn.quantized as nnq
from pt import configs
import operator_benchmark as op_bench
import torch
import torch.ao.nn.quantized as nnq
"""
Microbenchmarks for qEmbeddingBag operators.

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for quantized groupnorm operator."""

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for quantized instancenorm operator."""

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for the quantized interpolate op.

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for quantized layernorm operator."""

View File

@ -1,10 +1,9 @@
import torch
import torch.ao.nn.quantized as nnq
import torch.ao.nn.quantized.dynamic as nnqd
from pt import configs
import operator_benchmark as op_bench
import torch
import torch.ao.nn.quantized as nnq
import torch.ao.nn.quantized.dynamic as nnqd
"""
Microbenchmarks for Quantized Linear operators.

View File

@ -1,8 +1,7 @@
import operator_benchmark as op_bench
import torch
import torch.ao.quantization.observer as obs
import operator_benchmark as op_bench
qobserver_short_configs_dict = {
"attr_names": ("C", "M", "N", "dtype", "device"),
"attrs": (

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
# 2D pooling will have input matrix of rank 3 or 4
qpool2d_long_configs = op_bench.config_list(

View File

@ -1,8 +1,7 @@
import operator_benchmark as op_bench
import torch
from torch import nn
import operator_benchmark as op_bench
"""
Microbenchmarks for RNNs.
"""

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
# Configs for pointwise and reduction unary ops
qmethods_configs_short = op_bench.config_list(

View File

@ -1,10 +1,9 @@
import operator_benchmark as op_bench
import torch
import torch.ao.nn.quantized as nnq
import torch.ao.quantization as tq
import torch.nn as nn
import operator_benchmark as op_bench
"""Microbenchmarks for general quantization operations."""
# mode is used to show the direction of the benchmark:

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for quantized unary operators (point-wise and reduction)."""

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for remainder operators."""

View File

@ -1,8 +1,7 @@
import operator_benchmark as op_bench
import torch
import torch.nn as nn
import operator_benchmark as op_bench
"""
Microbenchmarks for the softmax operators.

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for Split operator"""

View File

@ -1,10 +1,10 @@
import random
from typing import List
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for Stack operator"""

View File

@ -1,6 +1,5 @@
import torch
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for sum reduction operator."""

Some files were not shown because too many files have changed in this diff Show More