mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
[BE][Easy][3/19] enforce style for empty lines in import segments in benchmarks/
(#129754)
See https://github.com/pytorch/pytorch/pull/129751#issue-2380881501. Most changes are auto-generated by linter. You can review these PRs via: ```bash git diff --ignore-all-space --ignore-blank-lines HEAD~1 ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/129754 Approved by: https://github.com/ezyang
This commit is contained in:
committed by
PyTorch MergeBot
parent
32995dec28
commit
c0ed38e644
@ -2,6 +2,7 @@ import argparse
|
||||
import json
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
Result = namedtuple("Result", ["name", "base_time", "diff_time"])
|
||||
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
from .DummyData import DummyData
|
||||
|
||||
|
||||
data_map = {"DummyData": DummyData}
|
||||
|
@ -1,3 +1,4 @@
|
||||
from .DummyModel import DummyModel
|
||||
|
||||
|
||||
model_map = {"DummyModel": DummyModel}
|
||||
|
@ -1,5 +1,6 @@
|
||||
from .server import AverageBatchParameterServer, AverageParameterServer
|
||||
|
||||
|
||||
server_map = {
|
||||
"AverageParameterServer": AverageParameterServer,
|
||||
"AverageBatchParameterServer": AverageBatchParameterServer,
|
||||
|
@ -6,6 +6,7 @@ from .iteration_steps import basic_iteration_step
|
||||
from .preprocess_data import preprocess_dummy_data
|
||||
from .trainer import DdpTrainer
|
||||
|
||||
|
||||
criterion_map = {"cel": cel}
|
||||
|
||||
ddp_hook_map = {
|
||||
|
@ -1,5 +1,6 @@
|
||||
import torch
|
||||
|
||||
|
||||
RPC_SPARSE = "rpc_sparse"
|
||||
RPC_DENSE = "rpc_dense"
|
||||
|
||||
|
@ -1,13 +1,13 @@
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
|
||||
from agent import AgentBase
|
||||
from observer import ObserverBase
|
||||
|
||||
import torch
|
||||
import torch.distributed.rpc as rpc
|
||||
|
||||
|
||||
COORDINATOR_NAME = "coordinator"
|
||||
AGENT_NAME = "agent"
|
||||
OBSERVER_NAME = "observer{}"
|
||||
|
@ -1,5 +1,4 @@
|
||||
import argparse
|
||||
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
@ -9,6 +8,7 @@ from coordinator import CoordinatorBase
|
||||
import torch.distributed.rpc as rpc
|
||||
import torch.multiprocessing as mp
|
||||
|
||||
|
||||
COORDINATOR_NAME = "coordinator"
|
||||
AGENT_NAME = "agent"
|
||||
OBSERVER_NAME = "observer{}"
|
||||
|
@ -1,8 +1,8 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
|
||||
from typing import Set
|
||||
|
||||
|
||||
|
@ -30,6 +30,7 @@ from zipfile import ZipFile
|
||||
import pandas as pd
|
||||
import requests
|
||||
|
||||
|
||||
# Note: the public query url targets this rockset lambda:
|
||||
# https://console.rockset.com/lambdas/details/commons.artifacts
|
||||
ARTIFACTS_QUERY_URL = "https://api.usw2a1.rockset.com/v1/public/shared_lambdas/4ca0033e-0117-41f5-b043-59cde19eff35"
|
||||
|
@ -6,6 +6,7 @@ import csv
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
assert len(sys.argv) == 3
|
||||
|
||||
RESULTS = defaultdict(dict)
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
|
@ -15,6 +15,7 @@ from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
|
||||
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
||||
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
|
||||
|
||||
|
||||
try:
|
||||
from .torchbench import setup_torchbench_cwd
|
||||
except ImportError:
|
||||
|
@ -10,6 +10,7 @@ from torch._dynamo.testing import reduce_to_scalar_loss
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from torch.profiler import profile, ProfilerActivity, record_function
|
||||
|
||||
|
||||
try:
|
||||
from .common import timed
|
||||
from .dist_util import apply_fsdp, cleanup, get_model, model_iter_fn, setup
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import importlib
|
||||
import logging
|
||||
import os
|
||||
@ -7,16 +8,17 @@ import subprocess
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
|
||||
try:
|
||||
from .common import BenchmarkRunner, download_retry_decorator, main, reset_rng_state
|
||||
except ImportError:
|
||||
from common import BenchmarkRunner, download_retry_decorator, main, reset_rng_state
|
||||
|
||||
import torch
|
||||
|
||||
from torch._dynamo.testing import collect_results
|
||||
from torch._dynamo.utils import clone_inputs
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Enable FX graph caching
|
||||
|
@ -1,12 +1,13 @@
|
||||
# flake8: noqa
|
||||
|
||||
import triton
|
||||
from prettytable import PrettyTable
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo
|
||||
import torch._inductor.config
|
||||
|
||||
|
||||
# torch._inductor.config.debug = True
|
||||
torch._inductor.config.triton.dense_indexing = True
|
||||
torch.manual_seed(0)
|
||||
|
@ -2,6 +2,7 @@ import timeit
|
||||
|
||||
import torch.fx
|
||||
|
||||
|
||||
N = 100000
|
||||
K = 1000
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
from benchmark_helper import time_with_torch_timer
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo
|
||||
import torch._dynamo.config
|
||||
import torch._inductor.config as config
|
||||
|
@ -2,11 +2,11 @@ import triton
|
||||
from benchmark_helper import time_with_torch_timer
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo
|
||||
import torch._dynamo.config
|
||||
import torch._inductor.config as config
|
||||
|
||||
|
||||
# The flag below controls whether to allow TF32 on matmul. This flag defaults to True.
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
# The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.
|
||||
|
@ -1,10 +1,10 @@
|
||||
from benchmark_helper import time_with_torch_timer
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo
|
||||
import torch._inductor.config as inductor_config
|
||||
|
||||
|
||||
inductor_config.triton.mm = "triton"
|
||||
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import inspect
|
||||
import sys
|
||||
@ -7,13 +8,13 @@ import numpy as np
|
||||
import tabulate
|
||||
|
||||
import torch
|
||||
|
||||
import torch._inductor
|
||||
from torch._dynamo.backends.cudagraphs import cudagraphs_inner
|
||||
from torch._dynamo.testing import same
|
||||
from torch._inductor.compile_fx import compile_fx
|
||||
from torch._inductor.utils import timed
|
||||
|
||||
|
||||
aten = torch.ops.aten
|
||||
|
||||
try:
|
||||
|
@ -12,6 +12,7 @@ from torch.utils import _pytree as pytree
|
||||
from torch.utils._python_dispatch import TorchDispatchMode
|
||||
from torch.utils._pytree import tree_map
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
OP_INP_DIRECTORY = os.path.join(os.path.dirname(__file__), "operator_inp_logs")
|
||||
|
@ -1,10 +1,10 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import click
|
||||
import numpy as np
|
||||
from operator_inp_utils import OperatorInputsLoader
|
||||
|
||||
import torch
|
||||
|
||||
from torch._dynamo.backends.cudagraphs import cudagraphs_inner
|
||||
from torch._dynamo.testing import same
|
||||
from torch._inductor.compile_fx import compile_fx
|
||||
@ -13,6 +13,7 @@ from torch._inductor.lowering import lowerings
|
||||
from torch._inductor.utils import gen_gm_and_inputs
|
||||
from torch.utils._pytree import tree_map_only
|
||||
|
||||
|
||||
aten = torch.ops.aten
|
||||
|
||||
|
||||
|
@ -3,6 +3,7 @@ import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
# This script takes the logs produced by the benchmark scripts (e.g.,
|
||||
# torchbench.py) and parses it into a CSV file that summarizes what
|
||||
# is failing and why. It is kept separate from the benchmark script
|
||||
|
@ -23,7 +23,6 @@ If you want to test float16
|
||||
|
||||
"""
|
||||
|
||||
|
||||
import argparse
|
||||
import dataclasses
|
||||
import functools
|
||||
@ -44,7 +43,6 @@ from os.path import abspath, exists
|
||||
from random import randint
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from matplotlib import rcParams
|
||||
@ -52,9 +50,9 @@ from scipy.stats import gmean
|
||||
from tabulate import tabulate
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo
|
||||
|
||||
|
||||
rcParams.update({"figure.autolayout": True})
|
||||
plt.rc("axes", axisbelow=True)
|
||||
|
||||
|
@ -2,9 +2,9 @@ import os
|
||||
import unittest
|
||||
|
||||
from .common import parse_args, run
|
||||
|
||||
from .torchbench import setup_torchbench_cwd, TorchBenchmarkRunner
|
||||
|
||||
|
||||
try:
|
||||
# fbcode only
|
||||
from aiplatform.utils.sanitizer_status import is_asan_or_tsan
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import importlib
|
||||
import logging
|
||||
import os
|
||||
@ -7,16 +8,17 @@ import subprocess
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
|
||||
try:
|
||||
from .common import BenchmarkRunner, download_retry_decorator, main
|
||||
except ImportError:
|
||||
from common import BenchmarkRunner, download_retry_decorator, main
|
||||
|
||||
import torch
|
||||
|
||||
from torch._dynamo.testing import collect_results, reduce_to_scalar_loss
|
||||
from torch._dynamo.utils import clone_inputs
|
||||
|
||||
|
||||
# Enable FX graph caching
|
||||
if "TORCHINDUCTOR_FX_GRAPH_CACHE" not in os.environ:
|
||||
torch._inductor.config.fx_graph_cache = True
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import functools
|
||||
import gc
|
||||
import importlib
|
||||
@ -14,6 +15,7 @@ import yaml
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
try:
|
||||
from .common import BenchmarkRunner, main
|
||||
except ImportError:
|
||||
@ -22,6 +24,7 @@ except ImportError:
|
||||
from torch._dynamo.testing import collect_results, reduce_to_scalar_loss
|
||||
from torch._dynamo.utils import clone_inputs
|
||||
|
||||
|
||||
# We are primarily interested in tf32 datatype
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
|
||||
|
@ -9,10 +9,10 @@ from datasets import load_dataset, load_metric
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
|
||||
# You will download around 84G dataset if you run this end to end training/evaluation example.
|
||||
|
@ -1,6 +1,7 @@
|
||||
from .cells import * # noqa: F403
|
||||
from .factory import * # noqa: F403
|
||||
|
||||
|
||||
# (output, next_state) = cell(input, state)
|
||||
seqLength = 100
|
||||
numLayers = 2
|
||||
|
@ -1,5 +1,6 @@
|
||||
import pytest # noqa: F401
|
||||
|
||||
|
||||
default_rnns = [
|
||||
"cudnn",
|
||||
"aten",
|
||||
|
@ -9,6 +9,7 @@ import torch.nn as nn
|
||||
from torch import Tensor
|
||||
from torch.nn import Parameter
|
||||
|
||||
|
||||
"""
|
||||
Some helper classes for writing custom TorchScript LSTMs.
|
||||
|
||||
|
@ -45,6 +45,7 @@ recurrent_scaleshift.graph_for(x, scale, shift)
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
x = torch.tensor([])
|
||||
x.requires_grad = True
|
||||
x.mean().backward() # no error triggered
|
||||
|
@ -1,10 +1,11 @@
|
||||
import argparse
|
||||
|
||||
from pt_wrapper_module import WrapperModule
|
||||
|
||||
from SimpleAddModule import add_tensors_loop, SimpleAddModule
|
||||
|
||||
from utils import benchmark_module, BenchmarkConfig, ModuleConfig, ms_to_us
|
||||
|
||||
|
||||
""" Framework overhead benchmark script.
|
||||
Benchmark framework overhead.
|
||||
Currently supported ops: add.
|
||||
|
@ -3,6 +3,7 @@ from collections import namedtuple
|
||||
|
||||
from torch.utils import ThroughputBenchmark
|
||||
|
||||
|
||||
NUM_LOOP_ITERS = 1000
|
||||
BenchmarkConfig = namedtuple("BenchmarkConfig", "num_warmup_iters num_iters")
|
||||
ModuleConfig = namedtuple("ModuleConfig", "pt_fn c2_op num_params graph_mode")
|
||||
|
@ -1,5 +1,4 @@
|
||||
import torchaudio_models as models
|
||||
|
||||
from utils import check_for_functorch, extract_weights, GetterReturnType, load_weights
|
||||
|
||||
import torch
|
||||
|
@ -6,6 +6,7 @@ from typing import Any, Callable, List, NamedTuple
|
||||
import torch
|
||||
from torch.autograd import functional
|
||||
|
||||
|
||||
try:
|
||||
import functorch as ft
|
||||
|
||||
|
@ -9,6 +9,7 @@ import torch
|
||||
import torch.nn.functional as F
|
||||
from torch import nn, Tensor
|
||||
|
||||
|
||||
__all__ = ["Wav2Letter"]
|
||||
|
||||
|
||||
|
@ -4,10 +4,10 @@ from collections import OrderedDict
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from torch.jit.annotations import Dict
|
||||
from torch.nn import functional as F
|
||||
|
||||
|
||||
try:
|
||||
from scipy.optimize import linear_sum_assignment
|
||||
|
||||
|
@ -2,9 +2,9 @@ from collections import defaultdict
|
||||
from typing import Callable, Dict, List, Tuple, Union
|
||||
|
||||
import torch
|
||||
|
||||
from torch import nn, Tensor
|
||||
|
||||
|
||||
# Type helpers
|
||||
InputsType = Union[Tensor, Tuple[Tensor, ...]]
|
||||
# A Getter takes in a device and returns a callable and the inputs to that callable
|
||||
|
@ -1,12 +1,12 @@
|
||||
from typing import cast
|
||||
|
||||
import torchvision_models as models
|
||||
|
||||
from utils import check_for_functorch, extract_weights, GetterReturnType, load_weights
|
||||
|
||||
import torch
|
||||
from torch import Tensor
|
||||
|
||||
|
||||
has_functorch = check_for_functorch()
|
||||
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
import pandas
|
||||
|
||||
|
||||
df = pandas.read_csv("perf.csv")
|
||||
|
||||
ops = pandas.unique(df["operator"])
|
||||
@ -11,6 +12,7 @@ pivot_speedups = (pivot_op_shape.T / pivot_op_shape["eager"]).T
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
plt.rcParams["figure.figsize"] = (20, 100)
|
||||
fig, axs = plt.subplots(nops)
|
||||
plt.subplots_adjust(hspace=0.5)
|
||||
|
@ -7,6 +7,7 @@ import click
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
torch.set_num_threads(1)
|
||||
torch._C._debug_set_fusion_group_inlining(False)
|
||||
|
||||
|
@ -10,6 +10,7 @@ import torch
|
||||
import torch.nn as nn
|
||||
from torch.utils.flop_counter import FlopCounterMode
|
||||
|
||||
|
||||
WARMUP_ITER = 5
|
||||
|
||||
A100_40G_BF16_TFLOPS = 312
|
||||
|
@ -14,6 +14,7 @@ from quantize import WeightOnlyInt8QuantHandler as LLaMAWeightOnlyInt8QuantHandl
|
||||
import torch
|
||||
import torch._inductor.config
|
||||
|
||||
|
||||
torch._inductor.config.coordinate_descent_tuning = True
|
||||
torch._inductor.config.triton.unique_kernel_names = True
|
||||
torch._inductor.config.fx_graph_cache = True # Experimental feature to reduce compilation times, will be on by default in future
|
||||
|
@ -5,6 +5,7 @@ import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
##### Quantization Primitives ######
|
||||
|
||||
|
||||
|
@ -10,6 +10,7 @@ import os
|
||||
|
||||
import pandas as pd
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Parse output files")
|
||||
parser.add_argument("--csv", type=str, help="Path to csv file")
|
||||
|
@ -1,5 +1,4 @@
|
||||
import argparse
|
||||
|
||||
import asyncio
|
||||
import os.path
|
||||
import subprocess
|
||||
|
@ -8,6 +8,7 @@ from typing import Dict, List, Optional, Set, Tuple, TYPE_CHECKING, Union
|
||||
|
||||
from worker.main import WorkerTimerArgs
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
# Benchmark utils are only partially strict compliant, so MyPy won't follow
|
||||
# imports using the public namespace. (Due to an exclusion rule in
|
||||
|
@ -13,6 +13,7 @@ from typing import List, Optional, Tuple, TYPE_CHECKING
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
# See the note in api.py for why this is necessary.
|
||||
from torch.utils.benchmark.utils.timer import Language
|
||||
|
@ -20,6 +20,7 @@ from worker.main import (
|
||||
WorkerUnpickler,
|
||||
)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
PopenType = subprocess.Popen[bytes]
|
||||
else:
|
||||
|
@ -1,4 +1,4 @@
|
||||
from pt import ( # noqa: F401 # noqa: F401
|
||||
from pt import ( # noqa: F401
|
||||
add_test,
|
||||
ao_sparsifier_test,
|
||||
as_strided_test,
|
||||
@ -31,5 +31,6 @@ from pt import ( # noqa: F401 # noqa: F401
|
||||
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
op_bench.benchmark_runner.main()
|
||||
|
@ -4,5 +4,6 @@ from pt import unary_test # noqa: F401
|
||||
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
op_bench.benchmark_runner.main()
|
||||
|
@ -6,6 +6,7 @@ import timeit
|
||||
from collections import namedtuple
|
||||
|
||||
import benchmark_utils
|
||||
|
||||
import numpy as np
|
||||
|
||||
import torch
|
||||
|
@ -1,10 +1,12 @@
|
||||
import argparse
|
||||
|
||||
import benchmark_core
|
||||
|
||||
import benchmark_utils
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
"""Performance microbenchmarks's main binary.
|
||||
|
||||
This is the main function for running performance microbenchmark tests.
|
||||
|
@ -4,6 +4,7 @@ import numpy as np
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
"""Microbenchmarks for Tensor repeat operator. Supports PyTorch."""
|
||||
|
||||
input_shapes = (
|
||||
|
@ -2,6 +2,7 @@ import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
intraop_bench_configs = op_bench.config_list(
|
||||
attrs=[
|
||||
[8, 16],
|
||||
|
@ -2,6 +2,7 @@ import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
"""Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch."""
|
||||
|
||||
add_short_configs = op_bench.config_list(
|
||||
|
@ -2,4 +2,5 @@
|
||||
import benchmark_runner # noqa: F401
|
||||
from benchmark_pytorch import TorchBenchmarkBase # noqa: F401
|
||||
from benchmark_test_generator import * # noqa: F401,F403
|
||||
|
||||
from benchmark_utils import * # noqa: F401,F403
|
||||
|
@ -1,6 +1,8 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""
|
||||
|
||||
# Configs for PT add operator
|
||||
|
@ -1,7 +1,7 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from torch.ao import pruning
|
||||
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""
|
||||
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
|
||||
"""
|
||||
Configs shared by multiple benchmarks
|
||||
"""
|
||||
|
@ -1,9 +1,11 @@
|
||||
from pt import configs
|
||||
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
"""
|
||||
Microbenchmarks for Conv1d and ConvTranspose1d operators.
|
||||
"""
|
||||
|
@ -1,4 +1,5 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
|
@ -2,8 +2,10 @@ import numpy
|
||||
from pt import configs
|
||||
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
"""Embedding and EmbeddingBag Operator Benchmark"""
|
||||
|
||||
|
||||
|
@ -1,8 +1,9 @@
|
||||
import operator_benchmark as op_bench
|
||||
import torch
|
||||
|
||||
import torch
|
||||
from torch.testing._internal.common_device_type import get_all_device_types
|
||||
|
||||
|
||||
"""Microbenchmark for Fill_ operator."""
|
||||
|
||||
fill_short_configs = op_bench.config_list(
|
||||
|
@ -1,6 +1,7 @@
|
||||
import numpy
|
||||
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
import numpy
|
||||
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
"""Microbenchmarks for interpolate operator."""
|
||||
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
"""Microbenchmarks for linear_prepack_fp16_ operator. Supports both Caffe2/PyTorch."""
|
||||
|
||||
# Configs for PT linear_prepack_fp16 operator
|
||||
|
@ -1,6 +1,7 @@
|
||||
from pt import configs
|
||||
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
"""Microbenchmarks for linear_unpack_fp16_ operator. Supports both Caffe2/PyTorch."""
|
||||
|
||||
# Configs for PT linear_unpack_fp16 operator
|
||||
|
@ -1,6 +1,8 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
"""Microbenchmarks for MatMul operator"""
|
||||
|
||||
# Configs for PT Matmul operator
|
||||
|
@ -1,6 +1,8 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
"""
|
||||
Microbenchmarks for batch matrix mult with einsum and torch.bmm.
|
||||
"""
|
||||
|
@ -1,7 +1,9 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
"""
|
||||
Microbenchmarks for MaxPool1d and AvgPool1d operators.
|
||||
"""
|
||||
|
@ -1,7 +1,9 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
import torch.ao.nn.quantized.functional as qF
|
||||
|
||||
|
||||
r"""Microbenchmarks for the quantized activations."""
|
||||
|
||||
qactivation_long_configs = op_bench.cross_product_configs(
|
||||
|
@ -1,7 +1,9 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
from torch._ops import ops
|
||||
|
||||
|
||||
qarithmetic_binary_configs = op_bench.cross_product_configs(
|
||||
N=(2, 8, 64, 512),
|
||||
dtype=(torch.quint8, torch.qint8, torch.qint32),
|
||||
|
@ -2,10 +2,12 @@ import numpy
|
||||
from pt import configs
|
||||
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
import torch.ao.nn.qat as nnqat
|
||||
from torch.ao.quantization import default_embedding_qat_qconfig
|
||||
|
||||
|
||||
"""
|
||||
Microbenchmarks for QAT Embedding + EmbeddingBag operators.
|
||||
"""
|
||||
|
@ -1,4 +1,5 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
qcomparators_configs = op_bench.cross_product_configs(
|
||||
N=(8, 64),
|
||||
dtype=(torch.quint8, torch.qint8, torch.qint32),
|
||||
|
@ -1,9 +1,11 @@
|
||||
from pt import configs
|
||||
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
import torch.ao.nn.quantized as nnq
|
||||
|
||||
|
||||
"""
|
||||
Microbenchmarks for qConv operators.
|
||||
"""
|
||||
|
@ -3,10 +3,11 @@ from typing import Optional
|
||||
import numpy as np
|
||||
|
||||
import operator_benchmark as op_bench
|
||||
import torch
|
||||
|
||||
import torch
|
||||
from torch.testing._internal.common_quantization import lengths_to_offsets
|
||||
|
||||
|
||||
torch.ops.load_library("//caffe2/torch/fb/sparsenn:sparsenn_operators")
|
||||
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
embeddingbag_conversion_short_configs = op_bench.cross_product_configs(
|
||||
num_embeddings=(80,), embedding_dim=(128, 256, 512), tags=("short",)
|
||||
)
|
||||
|
@ -2,9 +2,11 @@ import numpy
|
||||
from pt import configs
|
||||
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
import torch.ao.nn.quantized as nnq
|
||||
|
||||
|
||||
"""
|
||||
Microbenchmarks for qEmbeddingBag operators.
|
||||
"""
|
||||
|
@ -1,4 +1,5 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
"""Microbenchmarks for the quantized interpolate op.
|
||||
|
||||
Note: We are not benchmarking `upsample` as it is being deprecated, and calls
|
||||
|
@ -1,4 +1,5 @@
|
||||
import operator_benchmark as op_bench
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user