Files
pytorch/benchmarks/cpp/nvfuser/timm.cpp
jjsjann123 7b419e8513 [NVFuser] Upstream push 1026 (#87779)
Syncing nvfuser devel branch to upstream master. https://github.com/csarofeen/pytorch/

Codegen changes include:

* codegen improvement:
    i. allow non-root trivial reductions, allow empty/no-op fusion
    ii. fixes vectorization checks and size calculation
    iii. bank conflict handle improvement
    iv. enables transpose scheduler

* misc:
    i. CI tests failure fixes
    ii. cpp tests file clean up
    iii. trivial forwarding supports added in codegen runtime
    iv. added factory methods support in codegen

Commits that's in this PR from the devel branch:

```
7117a7e37ebec372d9e802fdfb8abb7786960f4a patching nvfuser conv cudnn test numerics mismatch (#2048)
65af1a4e7013f070df1ba33701f2d524de79d096 Inserting sync for redundant parallel types is already done at the (#2023)
6ac74d181689c8f135f60bfc1ec139d88941c98c Fix sync map (#2047)
f5bca333355e2c0033523f3402de5b8aac602c00 Bank conflict checker improvements (#2032)
d2ca7e3fd203537946be3f7b435303c60fa7f51e Minor update on cp.async code generation. (#1901)
d36cf61f5570c9c992a748126287c4e7432228e0 Test file cleanup (#2040)
0b8e83f49c2ea9f04a4aad5061c1e7f4268474c6 Allow non-root trivial reductions (#2037)
a2dfe40b27cd3f5c04207596f0a1818fbd5e5439 Fix vectorize size calculation (#2035)
e040676a317fe34ea5875276270c7be88f6eaa56 Use withPredicate to replace setPredicate to maintain Exprs immutable (#2025)
197221b847ad5eb347d7ec1cf2706733aacbf97c removing ci workflow (#2034)
40e2703d00795526e7855860aa00b9ab7160755f Reduction rand like patch (#2031)
bc772661cbdb3b711d8e9854ae9b8b7052e3e4a3 Add utility for checking bank conflict of shared memory (#2029)
ddd1cf7695f3fb172a0e4bcb8e4004573617a037 Add back FusionReductionWithTrivialReduction_CUDA (#2030)
fbd97e5ef15fa0f7573800e6fbb5743463fd9e57 Revert "Cleanup trivial reduction workarounds (#2006)" (#2024)
bca20c1dfb8aa8d881fc7973e7579ce82bc6a894 Cleanup trivial reduction workarounds (#2006)
e4b65850eee1d70084105bb6e1f290651adde23e Trivial forwarding (#1995)
1a0e355b5027ed0df501989194ee8f2be3fdd37a Fix contiguity analysis of predicates to match updated contiguity. (#1991)
a4effa6a5f7066647519dc56e854f4c8a2efd2a7 Enable output allocation cache (#2010)
35440b7953ed8da164a5fb28f87d7fd760ac5e00 Patching bn inference (#2016)
0f9f0b4060dc8ca18dc65779cfd7e0776b6b38e8 Add matmul benchmark (#2007)
45045cd05ea268f510587321dbcc8d7c2977cdab Enable tests previously disabled due to an aliasing bug (#2005)
967aa77d2c8e360c7c01587522eec1c1d377c87e Contiguous indexing for View operations (#1990)
a43cb20f48943595894e345865bc1eabf58a5b48 Make inlining even more modular (#2004)
dc458358c0ac91dfaf4e6655a9b3fc206fc0c897 Test util cleanup (#2003)
3ca21ebe4d213f0070ffdfa4ae5d7f6cb0b8e870 More strict validation (#2000)
a7a7d573310c4707a9f381831d3114210461af01 Fix build problem (#1999)
fc235b064e27921fa9d6dbb9dc7055e5bae1c222 Just fixes comments (#1998)
482386c0509fee6edb2964c5ae72074791f3e43a cleanup (#1997)
4cbe0db6558a82c3097d281eec9c85ad2ea0893a Improve divisible split detection (#1970)
42ccc52bdc18bab0330f4b93ed1399164e2980c9 Minor build fix. (#1996)
fcf8c091f72d46f3055975a35afd06263324ede6 Cleanup of lower_utils.cpp: Isolate out GpuLower usage (#1989)
15f2f6dba8cbf408ec93c344767c1862c30f7ecc Move ConcretizedBroadcastDomains to shared_ptr in GpuLower. (#1988)
8f1c7f52679a3ad6acfd419d28a2f4be4a7d89e2 Minor cleanup lower_unroll.cpp (#1994)
1d9858c80319ca7f0037db7de5f04e47f540d76c Minor cleanup (#1992)
f262d9cab59f41c669f53799c6d4a6b9fc4267eb Add support for uniform RNG (#1986)
eb1dad10c73f855eb1ecb20a8b1f7b6edb0c9ea3 Remove non-const functions, remove GpuLower instance on build, pass in ca_map. (#1987)
634820c5e3586c0fe44132c51179b3155be18072 Add support for some empty fusion (#1981)
eabe8d844ad765ee4973faa4821d451ef71b83c3 Segment self mapping fusions (#1954)
e96aacfd9cf9b3c6d08f120282762489bdf540c8 Enable Transpose operation (#1882)
425dce2777420248e9f08893765b5402644f4161 Add a null scheduler that helps segmenting away no-op schedules (#1835)
306d4a68f127dd1b854b749855e48ba23444ba60 Fix canScheduleCompileTime check of transpose scheduler (#1969)
b1bd32cc1b2ae7bbd44701477bddbcfa6642a9be Minor fix (#1967)
bd93578143c1763c1e00ba613a017f8130a6b989 Enable transpose scheduler (#1927)
b7a206e93b4ac823c791c87f12859cf7af264a4c Move scheduler vectorize utilities into their own file (#1959)
d9420e4ca090489bf210e68e9912bb059b895baf View scheduling (#1928)
c668e13aea0cf21d40f95b48e0163b812712cdf2 Upstream push ci fixes (#1965)
c40202bb40ce955955bb97b12762ef3b6b612997 Fix dump effective bandwidth (#1962)
93505bcbb90a7849bd67090fe5708d867e8909e4 WAR on index mapping when exact and permissive maps differ (#1960)
45e95fd1d3c773ee9b2a21d79624c279d269da9f Allow splitting inner-most ID to create virtual innermost ID in transpose scheduler (#1930)
a3ecb339442131f87842eb56955e4f17c544e99f Improve the comments at the beginning of index_compute.h (#1946)
f7bc3417cc2923a635042cc6cc361b2f344248d6 Remove unused variables (#1955)
df3393adbb5cb0309d091f358cfa98706bd4d313 Some cleanup (#1957)
7d1d7c8724ab5a226fad0f5a80feeac04975a496 TVDomainGuard factory (#1953)
357ba224c0fb41ed3e4e8594d95599c973f4a0ca Fill allocation with nan on tests (#1956)
8eafc54685d406f5ac527bcbacc475fda4492d7a Fix detection of unmappable root domains (#1952)
90a51f282601ba8ebd4c84b9334efd7762a234bc Some indexing cleanups, Add eye support (#1940)
ddc01e4e16428aec92f9c84d698f959b6436a971 Exclude unsupported data types (#1951)
992e17c0688fe690c51b50e81a75803621b7e6aa test the groups the same order as they are merged (#1949)
208262b75d1fed0597a0329d61d57bc8bcd7ff14 Move detection of self mapping IDs to IterDomainGraph from (#1941)
ac4de38c6ee53b366e85fdfe408c3642d32b57df Merge pull request #1945 from csarofeen/master_merge_0828
631094891a96f715d8c9925fb73d41013ca7f2e3 Add full, full_like, zeros, zeros_like, ones, ones_like (#1943)
aab10bce4541204c46b91ff0f0ed9878aec1bfc4 Merge remote-tracking branch 'upstream/viable/strict' into HEAD
4c254c063bb55887b45677e3812357556a7aa80d Fix arange when step is negative (#1942)
89330aa23aa804340b2406ab58899d816e3dc3d2 Tensor factories must set the output shape as its input (#1939)
```

RUN_TORCHBENCH: nvfuser

Differential Revision: [D40869846](https://our.internmc.facebook.com/intern/diff/D40869846)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/87779
Approved by: https://github.com/davidberard98
2022-11-04 20:04:34 +00:00

739 lines
24 KiB
C++

#include <torch/csrc/jit/codegen/cuda/executor.h>
#include <torch/csrc/jit/codegen/cuda/fusion.h>
#include <torch/csrc/jit/codegen/cuda/ir_all_nodes.h>
#include <torch/csrc/jit/codegen/cuda/ir_builder.h>
#include <torch/csrc/jit/codegen/cuda/scheduler/all_schedulers.h>
#include <benchmark/benchmark.h>
#include <benchmarks/cpp/nvfuser/utils.h>
using namespace torch::jit::fuser::cuda;
static void setup_vit_base_patch16_224_bcast7(Fusion* fusion, void* null) {
FusionGuard fg(fusion);
auto t2 = makeContigTensor(3, DataType::Float);
auto t3 = TensorViewBuilder()
.shape({-1, -1, 1})
.dtype(DataType::Float)
.contiguity({true, true, false})
.build();
auto t4 = TensorViewBuilder()
.shape({-1, -1, 1})
.dtype(DataType::Float)
.contiguity({true, true, false})
.build();
auto t7 = makeContigTensor(3, DataType::Half);
fusion->addInput(t2);
fusion->addInput(t3);
fusion->addInput(t4);
fusion->addInput(t7);
auto t8 = castOp(DataType::Float, t7);
auto t9 = set(t8);
auto t10 = sub(t2, t3);
auto t11 = mul(t10, t4);
auto t25 = mul(t9, t11);
auto t26 = sum(t25, {0, 1});
auto t36 = set(t26);
auto t27 = sum(t9, {0, 1});
auto t37 = set(t27);
auto t39 = castOp(DataType::Half, t11);
fusion->addOutput(t36);
fusion->addOutput(t37);
fusion->addOutput(t39);
}
static void NvFuserScheduler_TIMM_vit_base_patch16_224_bcast7(
benchmark::State& benchmark_state,
FusionExecutorCache* fusion_executor_cache,
void* null) {
std::vector<int64_t> input_shape{
benchmark_state.range(0),
benchmark_state.range(1),
benchmark_state.range(2)};
at::manual_seed(0);
auto fp16_options = at::TensorOptions().dtype(at::kHalf).device(at::kCUDA, 0);
auto fp32_options =
at::TensorOptions().dtype(at::kFloat).device(at::kCUDA, 0);
auto t2 = at::randn(input_shape, fp32_options);
auto t3 = at::randn({input_shape[0], input_shape[1], 1}, fp32_options);
auto t4 = at::randn({input_shape[0], input_shape[1], 1}, fp32_options);
auto t7 = at::randn(input_shape, fp16_options);
std::vector<c10::IValue> aten_inputs({t2, t3, t4, t7});
runBenchmarkIterations(benchmark_state, fusion_executor_cache, aten_inputs);
// full tensor - float + halfx2 - t2, t7, t39
// Inner most dimension only - floatx2 - t36, t37
// Outer two dimensions only - floatx2 - t3, t4
benchmark_state.SetBytesProcessed(
int64_t(benchmark_state.iterations()) *
// t2 + t7 t3 + t4 t36 + t37
t2.numel() * (4 + 2) + t3.numel() * 4 * 2 + input_shape[2] * (4 * 2) +
// T39
t2.numel() * 2);
}
NVFUSER_BENCHMARK_DEFINE(
NvFuserScheduler_TIMM_NCHW_vit_base_patch16_224_bcast7,
setup_vit_base_patch16_224_bcast7,
NvFuserScheduler_TIMM_vit_base_patch16_224_bcast7,
nullptr);
// pwise case, broadcasting both sides
NVFUSER_BENCHMARK_RUN(NvFuserScheduler_TIMM_NCHW_vit_base_patch16_224_bcast7)
->Args({64, 197, 768})
->Unit(benchmark::kMicrosecond)
->UseManualTime();
static void setup_vit_base_patch16_224_bcast5(Fusion* fusion, void* null) {
FusionGuard fg(fusion);
auto t2 = makeContigTensor(3, DataType::Float);
auto t5 = makeContigTensor(1, DataType::Float);
auto t3 = makeContigTensor(3, DataType::Half);
auto t0 = makeContigTensor(1, DataType::Float);
auto t1 = makeContigTensor(1, DataType::Float);
fusion->addInput(t2);
fusion->addInput(t5);
fusion->addInput(t3);
fusion->addInput(t0);
fusion->addInput(t1);
std::vector<bool> bcast_pattern0({true, true, false});
std::vector<bool> bcast_pattern1({false, false, true});
auto t4 = castOp(DataType::Float, t3);
auto t6 = set(t5);
auto t7 = broadcast(t6, bcast_pattern0);
auto t8 = add(t4, t7);
auto t9 = rand_like(t8);
auto d34 =
sub(IrBuilder::create<Double>(1.0), IrBuilder::create<Double>(0.0));
auto t10 = lt(t9, d34);
auto t11 = castOp(DataType::Float, t10);
auto t12 = mul(t8, t11);
auto b36 = eq(d34, IrBuilder::create<Double>(0.0));
auto d37 = castOp(DataType::Double, b36);
auto d38 = add(d37, d34);
auto d40 = div(IrBuilder::create<Double>(1.0), d38);
auto t13 = mul(t12, d40);
auto t14 = set(t13);
auto t15 = add(t2, t14);
auto t16 = set(t15);
auto t36 = sum(t16, {2});
auto d151 = castOp(DataType::Double, t2->axis(2)->extent());
auto d152 = mul(IrBuilder::create<Double>(1.0), d151);
auto t19 = div(t36, d152);
auto t22 = broadcast(t19, bcast_pattern1);
auto t23 = sub(t16, t22);
auto t37 = mul(t23, t23);
auto t20 = sum(t37, {2});
auto t24 = broadcast(t20, bcast_pattern1);
auto d95 = castOp(DataType::Double, t2->axis(2)->extent());
auto d105 = reciprocal(d95);
auto t25 = mul(t24, d105);
auto t26 = add(t25, IrBuilder::create<Double>(1e-6));
auto t27 = rsqrt(t26);
auto t28 = mul(t23, t27);
auto t17 = set(t1);
auto t29 = broadcast(t17, bcast_pattern0);
auto t30 = mul(t28, t29);
auto t18 = set(t0);
auto t31 = broadcast(t18, bcast_pattern0);
auto t32 = add(t30, t31);
auto t33 = set(t32);
auto t34 = castOp(DataType::Half, t33);
fusion->addOutput(t16); // full 3d float
fusion->addOutput(t10); // full 3d bool
fusion->addOutput(t22); // bcast last dim float
fusion->addOutput(t27); // bcast last dim float
fusion->addOutput(t18); // passthrough t0 float
fusion->addOutput(t17); // passthrough t1 float
fusion->addOutput(t34); // full 3d half
}
static void NvFuserScheduler_TIMM_vit_base_patch16_224_bcast5(
benchmark::State& benchmark_state,
FusionExecutorCache* fusion_executor_cache,
void* null) {
std::vector<int64_t> input_shape{
benchmark_state.range(0),
benchmark_state.range(1),
benchmark_state.range(2)};
at::manual_seed(0);
auto fp16_options = at::TensorOptions().dtype(at::kHalf).device(at::kCUDA, 0);
auto fp32_options =
at::TensorOptions().dtype(at::kFloat).device(at::kCUDA, 0);
auto t2 = at::randn(input_shape, fp32_options);
auto t5 = at::randn({input_shape[2]}, fp32_options);
auto t3 = at::randn(input_shape, fp16_options);
auto t0 = at::randn({input_shape[2]}, fp32_options);
auto t1 = at::randn({input_shape[2]}, fp32_options);
std::vector<c10::IValue> aten_inputs({t2, t5, t3, t0, t1});
runBenchmarkIterations(benchmark_state, fusion_executor_cache, aten_inputs);
// Full tensor - floatx2, halfx2, bool - t2, t16, t3, t34, t16
// Inner most dim only - floatx5 - t5, t0, t1, t7, t17
// Outer two dims only - floatx2 - t22, t27
benchmark_state.SetBytesProcessed(
int64_t(benchmark_state.iterations()) *
t2.numel() * (2 * 4 + 2 * 2 + 1) + t5.numel() * 5 * 4 +
input_shape[0] * input_shape[1] * 2 * 4);
}
NVFUSER_BENCHMARK_DEFINE(
NvFuserScheduler_TIMM_vit_base_patch16_224_bcast5_NCHW,
setup_vit_base_patch16_224_bcast5,
NvFuserScheduler_TIMM_vit_base_patch16_224_bcast5,
nullptr);
// Broadcast on both sides
NVFUSER_BENCHMARK_RUN(NvFuserScheduler_TIMM_vit_base_patch16_224_bcast5_NCHW)
->Args({64, 197, 768})
->Unit(benchmark::kMicrosecond)
->UseManualTime();
static void setup_vit_base_patch16_224_bcast_outer2(
Fusion* fusion,
void* null) {
FusionGuard fg(fusion);
auto t0 = makeContigTensor(3, DataType::Half);
auto t2 = makeContigTensor(1, DataType::Float);
fusion->addInput(t0);
fusion->addInput(t2);
auto t1 = castOp(DataType::Float, t0);
auto t3 = set(t2);
auto t4 = broadcast(t3, {true, true, false});
auto t5 = add(t1, t4);
auto t6 = castOp(DataType::Half, t5);
auto t7 = castOp(DataType::Half, t3);
fusion->addOutput(t6);
fusion->addOutput(t7);
}
static void NvFuserScheduler_TIMM_vit_base_patch16_224_bcast_outer2(
benchmark::State& benchmark_state,
FusionExecutorCache* fusion_executor_cache,
void* null) {
std::vector<int64_t> input_shape{
benchmark_state.range(0),
benchmark_state.range(1),
benchmark_state.range(2)};
at::manual_seed(0);
auto fp16_options = at::TensorOptions().dtype(at::kHalf).device(at::kCUDA, 0);
auto fp32_options =
at::TensorOptions().dtype(at::kFloat).device(at::kCUDA, 0);
auto t0 = at::randn(input_shape, fp16_options);
auto t2 = at::randn({input_shape[2]}, fp32_options);
std::vector<c10::IValue> aten_inputs({t0, t2});
runBenchmarkIterations(benchmark_state, fusion_executor_cache, aten_inputs);
// full tensor - halfx2 - t0, t6
// inner dimension only - halfx2 - t2, t7
benchmark_state.SetBytesProcessed(
int64_t(benchmark_state.iterations()) * t0.numel() * (2 + 2) +
input_shape[2] * (2 + 4));
}
NVFUSER_BENCHMARK_DEFINE(
NvFuserScheduler_TIMM_NCHW_vit_base_patch16_224_bcast_outer2,
setup_vit_base_patch16_224_bcast_outer2,
NvFuserScheduler_TIMM_vit_base_patch16_224_bcast_outer2,
nullptr);
NVFUSER_BENCHMARK_RUN(
NvFuserScheduler_TIMM_NCHW_vit_base_patch16_224_bcast_outer2)
->Args({64, 197, 2304})
->Unit(benchmark::kMicrosecond)
->UseManualTime();
static void setup_vit_base_patch16_224_norm_inner3(Fusion* fusion, void* null) {
FusionGuard fg(fusion);
auto t0 = makeContigTensor(4, DataType::Half);
fusion->addInput(t0);
auto d13 = IrBuilder::create<Double>();
fusion->addInput(d13);
auto t1 = castOp(DataType::Float, t0);
auto t2 = set(t1);
auto t3 = mul(t2, d13);
auto t4 = set(t3);
auto t5 = max(t4, {3});
auto t6 = broadcast(t5, {false, false, false, true});
auto t7 = sub(t4, t6);
auto t8 = exp(t7);
auto t9 = sum(t8, {3});
auto t10 = broadcast(t9, {false, false, false, true});
auto t11 = reciprocal(t10);
auto t12 = mul(t8, t11);
auto t13 = rand_like(t12);
auto d79 = sub(IrBuilder::create<Double>(1), IrBuilder::create<Double>(0));
auto t14 = lt(t13, d79);
auto t15 = castOp(DataType::Float, t14);
auto b81 = eq(d79, IrBuilder::create<Double>(0));
auto d82 = castOp(DataType::Double, b81);
auto d83 = add(d82, d79);
auto d85 = div(IrBuilder::create<Double>(1), d83);
auto t16 = mul(t12, t15);
auto t17 = mul(t16, d85);
auto t18 = set(t17);
auto t19 = castOp(DataType::Half, t18);
fusion->addOutput(t19);
fusion->addOutput(t14);
fusion->addOutput(t12);
fusion->addOutput(t4);
}
static void NvFuserScheduler_TIMM_vit_base_patch16_224_norm_inner3(
benchmark::State& benchmark_state,
FusionExecutorCache* fusion_executor_cache,
void* null) {
std::vector<int64_t> input_shape{
benchmark_state.range(0),
benchmark_state.range(1),
benchmark_state.range(2),
benchmark_state.range(2)};
at::manual_seed(0);
auto fp16_options = at::TensorOptions().dtype(at::kHalf).device(at::kCUDA, 0);
auto t0 = at::randn(input_shape, fp16_options);
std::vector<c10::IValue> aten_inputs({t0, 0.125});
runBenchmarkIterations(benchmark_state, fusion_executor_cache, aten_inputs);
// Full tensors - floatx2, half x2, bool - t12, t4, t0, t19, t14
benchmark_state.SetBytesProcessed(
int64_t(benchmark_state.iterations()) * t0.numel() * 13);
}
NVFUSER_BENCHMARK_DEFINE(
NvFuserScheduler_TIMM_NCHW_vit_base_patch16_224_norm_inner3,
setup_vit_base_patch16_224_norm_inner3,
NvFuserScheduler_TIMM_vit_base_patch16_224_norm_inner3,
nullptr);
// Norm inner dim
NVFUSER_BENCHMARK_RUN(
NvFuserScheduler_TIMM_NCHW_vit_base_patch16_224_norm_inner3)
->Args({64, 12, 197})
->Unit(benchmark::kMicrosecond)
->UseManualTime();
static void setup_vit_base_patch16_224_bcast_outer6(
Fusion* fusion,
void* null) {
FusionGuard fg(fusion);
auto t0 = makeContigTensor(3, DataType::Half);
auto t2 = makeContigTensor(1, DataType::Float);
fusion->addInput(t0);
fusion->addInput(t2);
auto t1 = castOp(DataType::Float, t0);
auto t3 = set(t2);
auto t4 = broadcast(t3, {true, true, false});
auto t5 = add(t1, t4);
auto t6 = set(t5);
auto t7 = mul(t6, IrBuilder::create<Double>(0.707106));
auto t8 = erf(t7);
auto t9 = add(IrBuilder::create<Double>(1), t8);
auto t10 = mul(IrBuilder::create<Double>(0.5), t9);
auto t11 = mul(t6, t10);
auto t12 = rand_like(t11);
auto d66 = sub(IrBuilder::create<Double>(1), IrBuilder::create<Double>(0));
auto t13 = lt(t12, d66);
auto t14 = castOp(DataType::Float, t13);
auto t15 = mul(t11, t14);
auto b68 = eq(d66, IrBuilder::create<Double>(0));
auto d69 = castOp(DataType::Double, b68);
auto d70 = add(d69, d66);
auto d72 = div(IrBuilder::create<Double>(1), d70);
auto t16 = mul(t15, d72);
auto t17 = set(t16);
auto t18 = castOp(DataType::Half, t17);
auto t19 = castOp(DataType::Half, t3);
fusion->addOutput(t18);
fusion->addOutput(t13);
fusion->addOutput(t6);
fusion->addOutput(t19);
}
static void NvFuserScheduler_TIMM_vit_base_patch16_224_bcast_outer6(
benchmark::State& benchmark_state,
FusionExecutorCache* fusion_executor_cache,
void* null) {
std::vector<int64_t> input_shape{
benchmark_state.range(0),
benchmark_state.range(1),
benchmark_state.range(2)};
at::manual_seed(0);
auto fp16_options = at::TensorOptions().dtype(at::kHalf).device(at::kCUDA, 0);
auto fp32_options =
at::TensorOptions().dtype(at::kFloat).device(at::kCUDA, 0);
auto t0 = at::randn(input_shape, fp16_options);
auto t2 = at::randn({input_shape[2]}, fp32_options);
std::vector<c10::IValue> aten_inputs({t0, t2});
runBenchmarkIterations(benchmark_state, fusion_executor_cache, aten_inputs);
// full tensors - float, halfx2, bool - t6, t0, t18, t13
// inner dimension only - float, half - t2, t19
benchmark_state.SetBytesProcessed(
int64_t(benchmark_state.iterations()) * t0.numel() * (2 + 2 + 1 + 4) +
input_shape[2] * (4 + 2));
}
NVFUSER_BENCHMARK_DEFINE(
NvFuserScheduler_TIMM_NCHW_vit_base_patch16_224_bcast_outer6,
setup_vit_base_patch16_224_bcast_outer6,
NvFuserScheduler_TIMM_vit_base_patch16_224_bcast_outer6,
nullptr);
NVFUSER_BENCHMARK_RUN(
NvFuserScheduler_TIMM_NCHW_vit_base_patch16_224_bcast_outer6)
// First size is original, the rest are variations to check perf
// reliability.
->Args({64, 197, 3 * 1024})
->Args({64, 197, 2 * 1024})
->Args({64, 197, 1024})
->Args({64, 197, 512})
->Args({3, 1024, 64 * 197})
->Args({2, 1024, 64 * 197})
->Args({1, 1024, 64 * 197})
->Args({2, 256, 64 * 197})
->Unit(benchmark::kMicrosecond)
->UseManualTime();
// Reverse the broadcast dimensions to check for consistency in scheduling.
static void setup_vit_base_patch16_224_bcast_inner6(
Fusion* fusion,
void* null) {
FusionGuard fg(fusion);
auto t0 = makeContigTensor(3, DataType::Half);
auto t2 = makeContigTensor(2, DataType::Float);
fusion->addInput(t0);
fusion->addInput(t2);
auto t1 = castOp(DataType::Float, t0);
auto t3 = set(t2);
auto t4 = broadcast(t3, {false, false, true});
auto t5 = add(t1, t4);
auto t6 = set(t5);
auto t7 = mul(t6, IrBuilder::create<Double>(0.707106));
auto t8 = erf(t7);
auto t9 = add(IrBuilder::create<Double>(1), t8);
auto t10 = mul(IrBuilder::create<Double>(0.5), t9);
auto t11 = mul(t6, t10);
auto t12 = rand_like(t11);
auto d66 = sub(IrBuilder::create<Double>(1), IrBuilder::create<Double>(0));
auto t13 = lt(t12, d66);
auto t14 = castOp(DataType::Float, t13);
auto t15 = mul(t11, t14);
auto b68 = eq(d66, IrBuilder::create<Double>(0));
auto d69 = castOp(DataType::Double, b68);
auto d70 = add(d69, d66);
auto d72 = div(IrBuilder::create<Double>(1), d70);
auto t16 = mul(t15, d72);
auto t17 = set(t16);
auto t18 = castOp(DataType::Half, t17);
auto t19 = castOp(DataType::Half, t3);
fusion->addOutput(t18);
fusion->addOutput(t13);
fusion->addOutput(t6);
fusion->addOutput(t19);
}
static void NvFuserScheduler_TIMM_vit_base_patch16_224_bcast_inner6(
benchmark::State& benchmark_state,
FusionExecutorCache* fusion_executor_cache,
void* null) {
std::vector<int64_t> input_shape{
benchmark_state.range(0),
benchmark_state.range(1),
benchmark_state.range(2)};
at::manual_seed(0);
auto fp16_options = at::TensorOptions().dtype(at::kHalf).device(at::kCUDA, 0);
auto fp32_options =
at::TensorOptions().dtype(at::kFloat).device(at::kCUDA, 0);
auto t0 = at::randn(input_shape, fp16_options);
auto t2 = at::randn({input_shape[0], input_shape[1]}, fp32_options);
std::vector<c10::IValue> aten_inputs({t0, t2});
runBenchmarkIterations(benchmark_state, fusion_executor_cache, aten_inputs);
// full tensors - float, halfx2, bool - t6, t0, t18, t13
// outer two dimensions only - float, half - t2, t19
benchmark_state.SetBytesProcessed(
int64_t(benchmark_state.iterations()) * t0.numel() * (2 + 2 + 1 + 4) +
input_shape[0] * input_shape[1] * (4 + 2));
}
NVFUSER_BENCHMARK_DEFINE(
NvFuserScheduler_TIMM_NCHW_vit_base_patch16_224_bcast_inner6,
setup_vit_base_patch16_224_bcast_inner6,
NvFuserScheduler_TIMM_vit_base_patch16_224_bcast_inner6,
nullptr);
NVFUSER_BENCHMARK_RUN(
NvFuserScheduler_TIMM_NCHW_vit_base_patch16_224_bcast_inner6)
->Args({64, 197, 3 * 1024})
->Args({64, 197, 2 * 1024})
->Args({64, 197, 1024})
->Args({64, 197, 512})
->Args({3, 1024, 64 * 197})
->Args({2, 1024, 64 * 197})
->Args({1, 1024, 64 * 197})
->Args({2, 256, 64 * 197})
->Unit(benchmark::kMicrosecond)
->UseManualTime();
static void setup_vit_base_patch16_224_LN_BWD(Fusion* fusion, void* null) {
FusionGuard fg(fusion);
auto t0 = makeContigTensor(3, DataType::Bool);
fusion->addInput(t0);
auto t1 = makeContigTensor(3, DataType::Half);
fusion->addInput(t1);
auto t2 = castOp(DataType::Float, t1);
auto t3 = makeContigTensor(3, DataType::Half);
fusion->addInput(t3);
auto t4 = castOp(DataType::Float, t3);
auto d35 = t3->axis(2)->extent();
auto t5 = TensorViewBuilder()
.shape({-1, -1, 1})
.dtype(DataType::Float)
.contiguity({true, true, false})
.build();
fusion->addInput(t5);
auto t6 = TensorViewBuilder()
.shape({-1, -1, 1})
.dtype(DataType::Float)
.contiguity({true, true, false})
.build();
fusion->addInput(t6);
auto t7 = makeContigTensor(1, DataType::Half);
fusion->addInput(t7);
auto t8 = castOp(DataType::Float, t7);
auto t9 = makeContigTensor(1, DataType::Half);
fusion->addInput(t9);
auto t11 = sub(t4, t5);
auto t12 = mul(t11, t6);
auto t13 = broadcast(t8, {true, true, false});
auto t14 = mul(t2, t13);
auto t15 = mul(d35, t14);
auto t16 = sum(t14, {2});
auto t17 = broadcast(t16, {false, false, true});
auto t18 = mul(t14, t12);
auto t19 = sum(t18, {2});
auto t20 = broadcast(t19, {false, false, true});
auto t40 = castOp(DataType::Half, t12);
auto t41 = castOp(DataType::Float, t40);
auto t42 = castOp(DataType::Half, t20);
auto t43 = castOp(DataType::Float, t42);
auto t21 = mul(t42, t43);
auto t38 = castOp(DataType::Half, t15);
auto t39 = castOp(DataType::Float, t38);
auto t44 = castOp(DataType::Half, t17);
auto t45 = castOp(DataType::Float, t44);
auto t22 = sub(t39, t45);
auto t23 = sub(t22, t21);
auto d87 = reciprocal(d35);
auto t24 = mul(d87, t6);
auto t25 = mul(t24, t23);
auto t26 = mul(t2, t41);
auto t27 = sum(t26, {0, 1});
auto t28 = sum(t2, {0, 1});
auto t29 = castOp(DataType::Float, t0);
auto t30 = mul(t25, t29);
auto d33 = IrBuilder::create<Double>();
fusion->addInput(d33);
auto t31 = mul(t30, d33);
auto t32 = sum(t31, {0, 1});
auto t33 = castOp(DataType::Half, t32);
auto t34 = castOp(DataType::Half, t31);
auto t35 = castOp(DataType::Half, t25);
auto t36 = castOp(DataType::Half, t27);
auto t37 = castOp(DataType::Half, t28);
fusion->addOutput(t33);
fusion->addOutput(t34);
fusion->addOutput(t35);
fusion->addOutput(t36);
fusion->addOutput(t37);
}
static void NvFuserScheduler_TIMM_vit_base_patch16_224_LN_BWD(
benchmark::State& benchmark_state,
FusionExecutorCache* fusion_executor_cache,
void* null) {
std::vector<int64_t> input_shape{
benchmark_state.range(0),
benchmark_state.range(1),
benchmark_state.range(2)};
at::manual_seed(0);
// auto bool_options = at::TensorOptions().dtype(at::kBool).device(at::kCUDA,
// 0);
auto fp16_options = at::TensorOptions().dtype(at::kHalf).device(at::kCUDA, 0);
auto fp32_options =
at::TensorOptions().dtype(at::kFloat).device(at::kCUDA, 0);
auto t0 = at::randn(input_shape, fp16_options).to(at::kBool);
auto t1 = at::randn(input_shape, fp16_options);
auto t3 = at::randn(input_shape, fp16_options);
auto t5 = at::randn({input_shape[0], input_shape[1], 1}, fp32_options);
auto t6 = at::randn({input_shape[0], input_shape[1], 1}, fp32_options);
auto t7 = at::randn({input_shape[2]}, fp16_options);
auto t9 = at::randn({input_shape[2]}, fp16_options);
std::vector<c10::IValue> aten_inputs({t0, t1, t3, t5, t6, t7, t9, 1.0});
runBenchmarkIterations(benchmark_state, fusion_executor_cache, aten_inputs);
// Full tensors - bool, halfx4 - t0, t1, t3, t34, t35
// Outer two dimensions - floatx2 - t5, t6
// Inner dimension - halfx5 - t7, t9, t33, t36, t37
benchmark_state.SetBytesProcessed(
int64_t(benchmark_state.iterations()) * ((t0.numel() * (4 * 2 + 1))) +
(t5.numel() * 4 * 2) + (t7.numel() * 5 * 2));
}
NVFUSER_BENCHMARK_DEFINE(
NvFuserScheduler_TIMM_NCHW_vit_base_patch16_224_LN_BWD,
setup_vit_base_patch16_224_LN_BWD,
NvFuserScheduler_TIMM_vit_base_patch16_224_LN_BWD,
nullptr);
NVFUSER_BENCHMARK_RUN(NvFuserScheduler_TIMM_NCHW_vit_base_patch16_224_LN_BWD)
->Args({128, 197, 768})
->Unit(benchmark::kMicrosecond)
->UseManualTime();
static void nhwc_seresnet152d_transpose65(Fusion* fusion, void* null) {
FusionGuard fg(fusion);
auto t2 = makeContigTensor(4, DataType::Half);
auto t5 = makeContigTensor(4, DataType::Half);
auto t7 = makeContigTensor(4, DataType::Half);
auto t9 = makeContigTensor(4, DataType::Half);
auto t4 = makeConcreteTensor({}, DataType::Half);
fusion->addInput(t2);
fusion->addInput(t5);
fusion->addInput(t7);
fusion->addInput(t9);
fusion->addInput(t4);
auto d86 = IrBuilder::create<Double>(0);
auto t3 = castOp(DataType::Float, t2);
auto t6 = castOp(DataType::Float, t5);
auto t8 = castOp(DataType::Float, t7);
auto t10 = castOp(DataType::Float, t9);
auto t11 = add(t8, t10);
auto t12 = set(t11);
auto t13 = set(t6);
auto t14 = lt(t13, d86);
auto t15 = broadcast(t4, {true, true, true, true});
auto t16 = where(t14, t15, t12);
auto t17 = set(t16);
auto t29 = castOp(DataType::Half, t17);
auto t18 = mul(t17, t3);
auto t19 = permute(t18, {0, 2, 3, 1});
auto t30 = castOp(DataType::Half, t19);
fusion->addOutput(t29);
fusion->addOutput(t30);
}
static void NvFuserScheduler_nhwc_seresnet152d_transpose65(
benchmark::State& benchmark_state,
FusionExecutorCache* fusion_executor_cache,
void* null) {
std::vector<int64_t> input_shape{
benchmark_state.range(0),
benchmark_state.range(2),
benchmark_state.range(2),
benchmark_state.range(1)};
at::manual_seed(0);
auto fp16_options = at::TensorOptions().dtype(at::kHalf).device(at::kCUDA, 0);
auto t2 = at::randn(input_shape, fp16_options);
auto t5 = at::randn(input_shape, fp16_options);
auto t7 = at::randn(input_shape, fp16_options);
auto t9 = at::randn(input_shape, fp16_options);
// Need zero dim tensor don't know how to do that, so just going to reduce a
// 1D tensor
auto t4 = at::randn({2}, fp16_options).sum();
std::vector<c10::IValue> aten_inputs({t2, t5, t7, t9, t4});
runBenchmarkIterations(benchmark_state, fusion_executor_cache, aten_inputs);
// Full tensors - halfx6 - t2, t5, t7, t9, t29, t30
benchmark_state.SetBytesProcessed(
int64_t(benchmark_state.iterations()) * t2.numel() * 6 * 2);
}
NVFUSER_BENCHMARK_DEFINE(
NvFuserScheduler_TIMM_nhwc_seresnet152d_transpose65,
nhwc_seresnet152d_transpose65,
NvFuserScheduler_nhwc_seresnet152d_transpose65,
nullptr);
// Norm inner dim Half version of vit_base_patch16_224_norm_inner3
NVFUSER_BENCHMARK_RUN(NvFuserScheduler_TIMM_nhwc_seresnet152d_transpose65)
->Args({128, 12, 197})
->Unit(benchmark::kMicrosecond)
->UseManualTime();