mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
Make PyTorch code-base clang-tidy compliant (#56892)
Summary: This is an automatic change generated by the following script: ``` #!/usr/bin/env python3 from subprocess import check_output, check_call import os def get_compiled_files_list(): import json with open("build/compile_commands.json") as f: data = json.load(f) files = [os.path.relpath(node['file']) for node in data] for idx, fname in enumerate(files): if fname.startswith('build/') and fname.endswith('.DEFAULT.cpp'): files[idx] = fname[len('build/'):-len('.DEFAULT.cpp')] return files def run_clang_tidy(fname): check_call(["python3", "tools/clang_tidy.py", "-c", "build", "-x", fname,"-s"]) changes = check_output(["git", "ls-files", "-m"]) if len(changes) == 0: return check_call(["git", "commit","--all", "-m", f"NOLINT stubs for {fname}"]) def main(): git_files = check_output(["git", "ls-files"]).decode("ascii").split("\n") compiled_files = get_compiled_files_list() for idx, fname in enumerate(git_files): if fname not in compiled_files: continue if fname.startswith("caffe2/contrib/aten/"): continue print(f"[{idx}/{len(git_files)}] Processing {fname}") run_clang_tidy(fname) if __name__ == "__main__": main() ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/56892 Reviewed By: H-Huang Differential Revision: D27991944 Pulled By: malfet fbshipit-source-id: 5415e1eb2c1b34319a4f03024bfaa087007d7179
This commit is contained in:
committed by
Facebook GitHub Bot
parent
5a10ee71d6
commit
4cb534f92e
@ -87,6 +87,7 @@ static void warnFallback(const c10::FunctionSchema& schema, bool is_inplace) {
|
||||
// the operator, and then pop the results off the stack.
|
||||
void batchedTensorInplaceForLoopFallback(const c10::OperatorHandle& op, torch::jit::Stack* stack) {
|
||||
const auto& schema = op.schema();
|
||||
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores,clang-diagnostic-unused-variable)
|
||||
const auto num_returns = schema.returns().size();
|
||||
warnFallback(schema, /*in_place*/true);
|
||||
|
||||
@ -106,6 +107,7 @@ void batchedTensorInplaceForLoopFallback(const c10::OperatorHandle& op, torch::j
|
||||
// For each BatchedTensor, also record what position of `arguments` they came from.
|
||||
SmallVector<Tensor,kVmapTransformStaticInputSize> batched_tensor_inputs;
|
||||
VmapDimVector batched_tensor_inputs_position;
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
for (int64_t idx = 0; idx < arguments.size(); ++idx) {
|
||||
const auto& ivalue = arguments[idx];
|
||||
if (!ivalue.isTensor()) {
|
||||
@ -177,6 +179,7 @@ void batchedTensorInplaceForLoopFallback(const c10::OperatorHandle& op, torch::j
|
||||
auto index = computeIndex(linear_idx, batch_sizes);
|
||||
auto batched_tensor_inputs_pos_iter = batched_tensor_inputs_position.begin();
|
||||
auto input_physical_views_iter = input_physical_views.begin();
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
for (int64_t arg_idx = 0; arg_idx < num_arguments; ++arg_idx) {
|
||||
// We assume that torch::jit::Stack is backed by vector<IValue> for
|
||||
// simplicity. When that is not the case, this code should be updated.
|
||||
@ -270,6 +273,7 @@ void batchedTensorForLoopFallback(const c10::OperatorHandle& op, torch::jit::Sta
|
||||
// For each BatchedTensor, also record what position of `arguments` they came from.
|
||||
SmallVector<Tensor,kVmapTransformStaticInputSize> batched_tensor_inputs;
|
||||
VmapDimVector batched_tensor_inputs_position;
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
for (int64_t idx = 0; idx < arguments.size(); ++idx) {
|
||||
const auto& ivalue = arguments[idx];
|
||||
if (!ivalue.isTensor()) {
|
||||
@ -320,6 +324,7 @@ void batchedTensorForLoopFallback(const c10::OperatorHandle& op, torch::jit::Sta
|
||||
auto index = computeIndex(linear_idx, batch_sizes);
|
||||
auto batched_tensor_inputs_pos_iter = batched_tensor_inputs_position.begin();
|
||||
auto input_physical_views_iter = input_physical_views.begin();
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
for (int64_t arg_idx = 0; arg_idx < num_arguments; ++arg_idx) {
|
||||
// We assume that torch::jit::Stack is backed by vector<IValue> for
|
||||
// simplicity. When that is not the case, this code should be updated.
|
||||
@ -343,6 +348,7 @@ void batchedTensorForLoopFallback(const c10::OperatorHandle& op, torch::jit::Sta
|
||||
// Store the result into `output_shards`. See NOTE: [Output shards layout]
|
||||
// to learn about the details of how we store the shards.
|
||||
const auto returns = torch::jit::last(stack, num_returns);
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
for (int64_t return_idx = 0; return_idx < returns.size(); ++return_idx) {
|
||||
output_shards[num_batches * return_idx + linear_idx] = returns[return_idx].toTensor();
|
||||
}
|
||||
@ -352,6 +358,7 @@ void batchedTensorForLoopFallback(const c10::OperatorHandle& op, torch::jit::Sta
|
||||
// For each output Tensor, stack the shards of the tensor together to form a return
|
||||
torch::jit::drop(stack, num_arguments);
|
||||
auto output_shards_chunks = MatrixRef<Tensor>(output_shards, num_batches);
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
for (int64_t return_idx = 0; return_idx < num_returns; ++return_idx) {
|
||||
auto shards = output_shards_chunks[return_idx];
|
||||
auto flat_output = safeStack(shards);
|
||||
|
@ -23,6 +23,7 @@ BatchedTensorImpl::BatchedTensorImpl(Tensor value, BatchDims bdims)
|
||||
const auto value_sizes = value_.sizes();
|
||||
const auto value_strides = value_.strides();
|
||||
sizes_and_strides_.resize(public_dims);
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
for (int64_t dim = 0; dim < public_dims; dim++) {
|
||||
auto actual_dim = actualDim(dim, /*wrap_dim=*/false);
|
||||
sizes_and_strides_.size_at_unchecked(dim) = value_sizes.at(actual_dim);
|
||||
|
@ -146,16 +146,19 @@ Tensor expand_batching_rule(const Tensor& self, IntArrayRef size, bool implicit)
|
||||
auto size_physical = self_physical.getPhysicalShape(size);
|
||||
auto self_physical_dim = self_physical.tensor().dim();
|
||||
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
TORCH_CHECK(self_physical_dim <= size_physical.size(),
|
||||
"expand: the number of sizes provided (", /*logical*/size.size(), ") ",
|
||||
"must be greater or equal to the number of dimensions in the tensor (",
|
||||
/*logical dim*/self.dim(), ")");
|
||||
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
if (self_physical_dim == size_physical.size()) {
|
||||
auto result = self_physical.tensor().expand(size_physical, implicit);
|
||||
return self_physical.getPhysicalToLogicalMap().apply(result);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
TORCH_INTERNAL_ASSERT(self_physical_dim < size_physical.size());
|
||||
// Here, we know we are expanding a (logical) tensor to a larger number
|
||||
// of dimensions. We have to be careful because we can't call expand directly
|
||||
|
@ -22,6 +22,7 @@ struct CPUGeneratorImplStateLegacy {
|
||||
int left; /* = 1; */
|
||||
int seeded; /* = 0; */
|
||||
uint64_t next;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
uint64_t state[at::MERSENNE_STATE_N]; /* the array for the state vector */
|
||||
|
||||
/********************************/
|
||||
@ -70,6 +71,7 @@ Generator createCPUGenerator(uint64_t seed_val) {
|
||||
* and return them as a 64 bit unsigned int
|
||||
*/
|
||||
inline uint64_t make64BitsFrom32Bits(uint32_t hi, uint32_t lo) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
return (static_cast<uint64_t>(hi) << 32) | lo;
|
||||
}
|
||||
|
||||
@ -140,6 +142,7 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
|
||||
auto double_normal_sample = c10::optional<double>();
|
||||
|
||||
// Construct the state of at::CPUGeneratorImpl based on input byte tensor size.
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
CPUGeneratorImplStateLegacy* legacy_pod;
|
||||
auto new_state_size = new_state.numel();
|
||||
if (new_state_size == size_legacy) {
|
||||
@ -154,6 +157,7 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
|
||||
// intermediate values.
|
||||
if (legacy_pod->normal_is_valid) {
|
||||
auto r = legacy_pod->normal_rho;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto theta = 2.0 * c10::pi<double> * legacy_pod->normal_x;
|
||||
// we return the sin version of the normal sample when in caching mode
|
||||
double_normal_sample = c10::optional<double>(r * ::sin(theta));
|
||||
@ -183,6 +187,7 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
|
||||
// Note that CPUGeneratorImplStateLegacy stored a state array of 64 bit uints, whereas in our
|
||||
// redefined mt19937, we have changed to a state array of 32 bit uints. Hence, we are
|
||||
// doing a std::copy.
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
at::mt19937_data_pod rng_data;
|
||||
std::copy(std::begin(legacy_pod->state), std::end(legacy_pod->state), rng_data.state_.begin());
|
||||
rng_data.seed_ = legacy_pod->the_initial_seed;
|
||||
|
@ -92,13 +92,16 @@ void Context::setAllowTF32CuDNN(bool b) {
|
||||
allow_tf32_cudnn = b;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
static const char cublas_config_var_name[] = "CUBLAS_WORKSPACE_CONFIG";
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
static const char* const cublas_deterministic_configs[] = { ":4096:8", ":16:8" };
|
||||
|
||||
bool Context::checkCuBLASConfigDeterministic() {
|
||||
bool cublas_config_deterministic = true;
|
||||
// If using CUDA 10.2 or greater, need to make sure CuBLAS workspace config
|
||||
// is set to deterministic setting
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (hasCUDART() && (versionCUDART() >= 10020)) {
|
||||
char* workspace_config = std::getenv(cublas_config_var_name);
|
||||
cublas_config_deterministic = (workspace_config != nullptr) && (
|
||||
@ -240,6 +243,7 @@ Allocator* getCPUAllocator() {
|
||||
// means the allow_tf32 flags are overrided and tf32 is force disabled
|
||||
// override_allow_tf32_flag = false
|
||||
// means the original allow_tf32 flags are followed
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
thread_local bool override_allow_tf32_flag = false;
|
||||
|
||||
NoTF32Guard::NoTF32Guard() {
|
||||
@ -273,6 +277,7 @@ void Context::setDefaultMobileCPUAllocator() {
|
||||
"Cannot set another allocator.");
|
||||
// Setting the priority high to make sure no other allocator gets used instead of this.
|
||||
prev_allocator_ptr_ = c10::GetCPUAllocator();
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
c10::SetCPUAllocator(c10::GetDefaultMobileCPUAllocator(), /*priority*/ 100);
|
||||
}
|
||||
|
||||
@ -281,6 +286,7 @@ void Context::unsetDefaultMobileCPUAllocator() {
|
||||
"setDefaultMobileCPUAllocator must have been called "
|
||||
"before unsetDefaultMobileCPUAllocator.");
|
||||
// Setting the priority high to make sure no other allocator gets used instead of this.
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
c10::SetCPUAllocator(prev_allocator_ptr_ , /*priority*/ 100);
|
||||
prev_allocator_ptr_ = nullptr;
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ namespace at {
|
||||
DLDataType getDLDataType(const Tensor& t) {
|
||||
DLDataType dtype;
|
||||
dtype.lanes = 1;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
dtype.bits = t.element_size() * 8;
|
||||
switch (t.scalar_type()) {
|
||||
case ScalarType::Byte:
|
||||
@ -18,12 +19,14 @@ DLDataType getDLDataType(const Tensor& t) {
|
||||
case ScalarType::Char:
|
||||
dtype.code = DLDataTypeCode::kDLInt;
|
||||
break;
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone)
|
||||
case ScalarType::Double:
|
||||
dtype.code = DLDataTypeCode::kDLFloat;
|
||||
break;
|
||||
case ScalarType::Float:
|
||||
dtype.code = DLDataTypeCode::kDLFloat;
|
||||
break;
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone)
|
||||
case ScalarType::Int:
|
||||
dtype.code = DLDataTypeCode::kDLInt;
|
||||
break;
|
||||
@ -124,6 +127,7 @@ ScalarType toScalarType(const DLDataType& dtype) {
|
||||
switch (dtype.code) {
|
||||
case DLDataTypeCode::kDLUInt:
|
||||
switch (dtype.bits) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
case 8:
|
||||
stype = ScalarType::Byte;
|
||||
break;
|
||||
@ -134,15 +138,19 @@ ScalarType toScalarType(const DLDataType& dtype) {
|
||||
break;
|
||||
case DLDataTypeCode::kDLInt:
|
||||
switch (dtype.bits) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
case 8:
|
||||
stype = ScalarType::Char;
|
||||
break;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
case 16:
|
||||
stype = ScalarType::Short;
|
||||
break;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
case 32:
|
||||
stype = ScalarType::Int;
|
||||
break;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
case 64:
|
||||
stype = ScalarType::Long;
|
||||
break;
|
||||
@ -153,12 +161,15 @@ ScalarType toScalarType(const DLDataType& dtype) {
|
||||
break;
|
||||
case DLDataTypeCode::kDLFloat:
|
||||
switch (dtype.bits) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
case 16:
|
||||
stype = ScalarType::Half;
|
||||
break;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
case 32:
|
||||
stype = ScalarType::Float;
|
||||
break;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
case 64:
|
||||
stype = ScalarType::Double;
|
||||
break;
|
||||
@ -173,6 +184,7 @@ ScalarType toScalarType(const DLDataType& dtype) {
|
||||
return stype;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
struct ATenDLMTensor {
|
||||
Tensor handle;
|
||||
DLManagedTensor tensor;
|
||||
@ -198,8 +210,10 @@ DLManagedTensor* toDLPack(const Tensor& src) {
|
||||
atDLMTensor->tensor.dl_tensor.ndim = src.dim();
|
||||
atDLMTensor->tensor.dl_tensor.dtype = getDLDataType(src);
|
||||
atDLMTensor->tensor.dl_tensor.shape =
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
const_cast<int64_t*>(src.sizes().data());
|
||||
atDLMTensor->tensor.dl_tensor.strides =
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
const_cast<int64_t*>(src.strides().data());
|
||||
atDLMTensor->tensor.dl_tensor.byte_offset = 0;
|
||||
return &(atDLMTensor->tensor);
|
||||
@ -209,6 +223,7 @@ Tensor fromDLPack(const DLManagedTensor* src) {
|
||||
Device device = getATenDevice(src->dl_tensor.ctx);
|
||||
ScalarType stype = toScalarType(src->dl_tensor.dtype);
|
||||
auto deleter = [src](void* self) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
src->deleter(const_cast<DLManagedTensor*>(src));
|
||||
};
|
||||
if (!src->dl_tensor.strides) {
|
||||
|
@ -18,6 +18,7 @@ namespace {
|
||||
ScalarType infer_scalar_type(const Tensor & t) {
|
||||
return t.scalar_type();
|
||||
}
|
||||
// NOLINTNEXTLINE(clang-diagnostic-unused-function)
|
||||
ScalarType infer_scalar_type(const TensorList & tl) {
|
||||
TORCH_CHECK(tl.size() > 0, "expected a non-empty list of Tensors");
|
||||
return tl[0].scalar_type();
|
||||
|
@ -169,6 +169,7 @@ void propagate_names_except(const Tensor& result, const Tensor& src, IntArrayRef
|
||||
auto src_names = src.names();
|
||||
auto result_dim = result.dim();
|
||||
auto src_dim = src_names.size();
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
TORCH_INTERNAL_ASSERT(src_dim - excluded_idxs.size() == result_dim);
|
||||
|
||||
// fast path
|
||||
@ -253,6 +254,7 @@ std::vector<Dimname> compute_diagonal_outnames(
|
||||
// tensors that we contract together. Usually other_dotted_dim is 0
|
||||
// and tensor_dotted_dim is the last dim of tensor, but there are some special
|
||||
// cases like einsum and tensordot where one can contract arbitrary dims.
|
||||
// NOLINTNEXTLINE(clang-diagnostic-unused-function)
|
||||
static std::vector<Dimname> compute_dot_product_outnames(
|
||||
DimnameList tensor_names,
|
||||
int64_t tensor_dotted_dim,
|
||||
@ -265,10 +267,12 @@ static std::vector<Dimname> compute_dot_product_outnames(
|
||||
std::vector<Dimname> outnames(num_outnames, Dimname::wildcard());
|
||||
int64_t index = 0;
|
||||
for (size_t j = 0; j < tensor_names.size(); ++j) {
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
if (j == tensor_dotted_dim) continue;
|
||||
outnames[index++] = tensor_names[j];
|
||||
}
|
||||
for (size_t j = 0; j < other_names.size(); ++j) {
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
if (j == other_dotted_dim) continue;
|
||||
outnames[index++] = other_names[j];
|
||||
}
|
||||
@ -294,6 +298,7 @@ static void check_feature_names_are_distinct(
|
||||
". Please rename the input tensors with `Tensor.rename` to prevent this.");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(clang-diagnostic-unused-function)
|
||||
static DimnameList batch_dims(DimnameList names) {
|
||||
if (names.size() <= 2) {
|
||||
return {};
|
||||
@ -301,6 +306,7 @@ static DimnameList batch_dims(DimnameList names) {
|
||||
return DimnameList(names.begin(), names.end() - 2);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(clang-diagnostic-unused-function)
|
||||
static DimnameList feature_dims(DimnameList names) {
|
||||
if (names.size() <= 2) {
|
||||
return names;
|
||||
@ -308,6 +314,7 @@ static DimnameList feature_dims(DimnameList names) {
|
||||
return DimnameList(names.end() - 2, 2);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(clang-diagnostic-unused-function)
|
||||
static bool are_distinct(DimnameList batch_dims, DimnameList feature_dims) {
|
||||
for (const auto& target : feature_dims) {
|
||||
if (target.isWildcard()) {
|
||||
@ -366,6 +373,7 @@ static std::vector<Dimname> compute_matmul_outnames(
|
||||
const auto result = working_names.toDimnameVec();
|
||||
|
||||
check_feature_names_are_distinct(self_names, other_names, result);
|
||||
// NOLINTNEXTLINE(performance-no-automatic-move)
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -23,9 +23,11 @@ namespace at {
|
||||
namespace {
|
||||
// used with _set_in_parallel_region to mark master thread
|
||||
// as in parallel region while executing parallel primitives
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
thread_local bool in_parallel_region_ = false;
|
||||
|
||||
// thread number (task_id) set by parallel primitive
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
thread_local size_t thread_num_ = 0;
|
||||
|
||||
void _set_in_parallel_region(bool in_region) {
|
||||
@ -53,6 +55,7 @@ const int CONSUMED = -2;
|
||||
// - NOT_SET - pool not initialized, user value is not set
|
||||
// - positive value - pool not initialized, user value set
|
||||
// - CONSUMED - pool is initialized
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
std::atomic<int> num_intraop_threads{NOT_SET};
|
||||
|
||||
int _num_pool_threads(int nthreads) {
|
||||
@ -123,10 +126,12 @@ void _parallel_run(
|
||||
const std::function<void(int64_t, int64_t, size_t)>& f) {
|
||||
at::internal::lazy_init_num_threads();
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
size_t num_tasks, chunk_size;
|
||||
std::tie(num_tasks, chunk_size) =
|
||||
internal::calc_num_tasks_and_chunk_size(begin, end, grain_size);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
struct {
|
||||
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
|
||||
std::exception_ptr eptr;
|
||||
@ -197,6 +202,7 @@ void set_num_threads(int nthreads) {
|
||||
int stored_nthreads = num_intraop_threads.load();
|
||||
if (stored_nthreads <= 0) {
|
||||
// plus one because of master thread
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
||||
stored_nthreads = _get_intraop_pool().size() + 1;
|
||||
}
|
||||
if (stored_nthreads != nthreads) {
|
||||
@ -224,6 +230,7 @@ int get_num_threads() {
|
||||
return intraop_default_num_threads();
|
||||
} else {
|
||||
TORCH_INTERNAL_ASSERT(nthreads == CONSUMED);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
||||
return _get_intraop_pool().size() + 1;
|
||||
}
|
||||
#else
|
||||
|
@ -17,6 +17,7 @@ const int CONSUMED = -2;
|
||||
// (CONSUMED - thread pool is initialized)
|
||||
// or
|
||||
// NOT_SET -> CONSUMED
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
std::atomic<int> num_interop_threads{NOT_SET};
|
||||
|
||||
// thread pool global instance is hidden,
|
||||
@ -45,6 +46,7 @@ std::shared_ptr<TaskThreadPoolBase> create_c10_threadpool(
|
||||
|
||||
} // namespace
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
C10_REGISTER_CREATOR(ThreadPoolRegistry, C10, create_c10_threadpool);
|
||||
|
||||
void set_num_interop_threads(int nthreads) {
|
||||
@ -79,8 +81,10 @@ void launch_no_thread_state(std::function<void()> fn) {
|
||||
} // namespace internal
|
||||
|
||||
void launch(std::function<void()> func) {
|
||||
// NOLINTNEXTLINE(modernize-avoid-bind)
|
||||
internal::launch_no_thread_state(std::bind([](
|
||||
std::function<void()> f, ThreadLocalState thread_locals) {
|
||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
||||
ThreadLocalStateGuard guard(std::move(thread_locals));
|
||||
f();
|
||||
},
|
||||
|
@ -4,6 +4,7 @@ namespace at {
|
||||
namespace sequence_number {
|
||||
|
||||
namespace {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
thread_local uint64_t sequence_nr_ = 0;
|
||||
} // namespace
|
||||
|
||||
|
@ -37,6 +37,7 @@ Tensor flatten_indices(const Tensor& indices, IntArrayRef full_size, bool force_
|
||||
}
|
||||
auto indices_mult_cpu = at::from_blob(
|
||||
indices_mult_cpu_vec.data(),
|
||||
// NOLINTNEXTLINE(bugprone-argument-comment)
|
||||
/*size=*/{sparse_dim, 1},
|
||||
indices.options().device(kCPU));
|
||||
// NB: must be blocking because this blob may be freed after this closure,
|
||||
@ -94,7 +95,9 @@ Tensor coo_to_csr(const int64_t* indices, int64_t dim, int64_t nnz) {
|
||||
if (nnz > 0) {
|
||||
auto csr_accessor = csr.accessor<int64_t, 1>();
|
||||
// Convert the sparse matrix to CSR format
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
at::parallel_for(0, nnz, 10000, [&](int64_t start, int64_t end) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t h, hp0, hp1;
|
||||
for (auto i = start; i < end; i++) {
|
||||
hp0 = indices[i];
|
||||
|
@ -104,29 +104,37 @@ TORCH_API std::ostream& operator<<(std::ostream& stream, const Slice& slice);
|
||||
// `torch.tensor([1, 2])`) | `torch::tensor({1, 2})`
|
||||
struct TORCH_API TensorIndex final {
|
||||
// Case 1: `at::indexing::None`
|
||||
// NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.UninitializedObject)
|
||||
TensorIndex(c10::nullopt_t) : type_(TensorIndexType::None) {}
|
||||
|
||||
// Case 2: "..." / `at::indexing::Ellipsis`
|
||||
// NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.UninitializedObject)
|
||||
TensorIndex(at::indexing::EllipsisIndexType) : type_(TensorIndexType::Ellipsis) {}
|
||||
TensorIndex(const char *str) : TensorIndex(at::indexing::Ellipsis) {
|
||||
// NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.UninitializedObject)
|
||||
TORCH_CHECK_VALUE(
|
||||
strcmp(str, "...") == 0,
|
||||
"Expected \"...\" to represent an ellipsis index, but got \"", str, "\"");
|
||||
}
|
||||
|
||||
// Case 3: Integer value
|
||||
// NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.UninitializedObject)
|
||||
TensorIndex(int64_t integer) : integer_(integer), type_(TensorIndexType::Integer) {}
|
||||
// NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.UninitializedObject)
|
||||
TensorIndex(int integer) : TensorIndex((int64_t)integer) {}
|
||||
|
||||
// Case 4: Boolean value
|
||||
template <class T,
|
||||
class = typename std::enable_if<std::is_same<bool, T>::value>::type >
|
||||
// NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.UninitializedObject)
|
||||
TensorIndex(T boolean) : boolean_(boolean), type_(TensorIndexType::Boolean) {}
|
||||
|
||||
// Case 5: Slice represented in `at::indexing::Slice` form
|
||||
// NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.UninitializedObject)
|
||||
TensorIndex(Slice slice) : slice_(std::move(slice)), type_(TensorIndexType::Slice) {}
|
||||
|
||||
// Case 6: Tensor value
|
||||
// NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.UninitializedObject)
|
||||
TensorIndex(Tensor tensor) : tensor_(std::move(tensor)), type_(TensorIndexType::Tensor) {}
|
||||
|
||||
inline bool is_none() const {
|
||||
|
@ -46,6 +46,7 @@ const TensorName& TensorName::unify(const TensorName& other, const char* op_name
|
||||
|
||||
TensorNames::TensorNames(ArrayRef<Dimname> names) {
|
||||
names_.reserve(names.size());
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
for (int64_t idx = 0; idx < names.size(); ++idx) {
|
||||
names_.emplace_back(names, idx);
|
||||
}
|
||||
@ -61,6 +62,7 @@ TensorNames::TensorNames(ArrayRef<Dimname> names, int64_t start, int64_t end) {
|
||||
}
|
||||
|
||||
TensorNames& TensorNames::unifyFromRightInplace(const TensorNames& other, const char* op_name) {
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,clang-diagnostic-absolute-value,cppcoreguidelines-narrowing-conversions)
|
||||
size_t size_diff = std::labs(names_.size() - other.names_.size());
|
||||
|
||||
if (names_.size() > other.names_.size()) {
|
||||
@ -73,6 +75,7 @@ TensorNames& TensorNames::unifyFromRightInplace(const TensorNames& other, const
|
||||
names_.begin(),
|
||||
other.names_.begin(),
|
||||
other.names_.begin() + size_diff);
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
for (int64_t idx = size_diff; idx < names_.size(); ++idx) {
|
||||
names_[idx] = names_[idx].unify(other.names_[idx], op_name);
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <c10/util/accumulate.h>
|
||||
|
||||
|
||||
// NOLINTNEXTLINE(modernize-deprecated-headers)
|
||||
#include <stdarg.h>
|
||||
#include <cstdlib>
|
||||
#include <stdexcept>
|
||||
@ -14,6 +15,7 @@
|
||||
namespace at {
|
||||
|
||||
int _crash_if_asan(int arg) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
volatile char x[3];
|
||||
x[arg] = 0;
|
||||
return x[0];
|
||||
@ -36,6 +38,7 @@ Tensor empty_cpu(
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
|
||||
|
||||
bool pin_memory = pinned_memory_or_default(pin_memory_opt);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
c10::Allocator* allocator;
|
||||
if (pin_memory) {
|
||||
allocator = detail::getCUDAHooks().getPinnedMemoryAllocator();
|
||||
|
@ -3,6 +3,7 @@
|
||||
namespace at {
|
||||
namespace impl {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
thread_local int64_t VmapMode_current_vmap_level = 0;
|
||||
|
||||
int64_t VmapMode::current_vmap_level() {
|
||||
|
@ -5,6 +5,7 @@ namespace at {
|
||||
|
||||
// Checks if the batch dims in `bdims` appear at the front of the tensor.
|
||||
static bool areBdimsAtFrontInOrder(BatchDimsRef bdims) {
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
for (int64_t idx = 0; idx < bdims.size(); idx++) {
|
||||
if (bdims[idx].dim() != idx) {
|
||||
return false;
|
||||
@ -29,6 +30,7 @@ static Tensor permuteBatchDimsToFront(BatchedTensorImpl* batched) {
|
||||
for (const auto& bdim : bdims) {
|
||||
permutation[idx++] = bdim.dim();
|
||||
}
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
for (int64_t ptr = 0; idx < sizes.size(); ptr++) {
|
||||
if (is_bdim[ptr]) {
|
||||
continue;
|
||||
@ -137,8 +139,10 @@ static Tensor alignBatchDimsAtFront(
|
||||
auto physical_sizes = physical_tensor.sizes();
|
||||
|
||||
auto tensor_example_dim = physical_sizes.size() - /*num_batch_dims*/tensor_levels.count();
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
TORCH_INTERNAL_ASSERT(tensor_example_dim <= requested_example_dim);
|
||||
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
if (tensor_levels == requested_levels && tensor_example_dim == requested_example_dim) {
|
||||
// Optimization: no need to do another view if the physical tensor is
|
||||
// already the correct shape
|
||||
@ -157,6 +161,7 @@ static Tensor alignBatchDimsAtFront(
|
||||
// align the bdims
|
||||
int64_t level = 0;
|
||||
int64_t tensor_dim = 0;
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
for (int64_t bdim = 0; bdim < requested_levels.count(); bdim++) {
|
||||
// Determine the level of the bdim
|
||||
while (!requested_levels[level]) level++;
|
||||
@ -252,6 +257,7 @@ VmapPhysicalViewVec BroadcastingVmapTransform::logicalToPhysical(TensorList logi
|
||||
VmapPhysicalViewVec result;
|
||||
|
||||
std::bitset<kVmapNumLevels> levels;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t largest_logical_dim;
|
||||
std::tie(levels, largest_logical_dim) = getLevelsAndLargestLogicalDim(logical_tensors);
|
||||
|
||||
@ -280,6 +286,7 @@ Tensor VmapPhysicalToLogicalMap::apply(const Tensor& physical_tensor) const {
|
||||
}
|
||||
|
||||
void VmapPhysicalToLogicalMap::applyInplace(std::vector<Tensor>& physical_tensors) const {
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare,modernize-loop-convert)
|
||||
for (int64_t idx = 0; idx < physical_tensors.size(); ++idx) {
|
||||
physical_tensors[idx] = apply(physical_tensors[idx]);
|
||||
}
|
||||
|
@ -39,12 +39,14 @@ namespace {
|
||||
// directly against incoming TensorImpl*s.
|
||||
using weakref_type = c10::weak_intrusive_ptr<TensorImpl, UndefinedTensorImpl>;
|
||||
using val_type = std::tuple<weakref_type, Tensor>;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
thread_local std::unordered_map<TensorImpl*, val_type> cached_casts;
|
||||
|
||||
// nesting tracks the nesting depth of the Python-side context manager.
|
||||
// When the autocast context manager exits to a nesting level that's outside
|
||||
// any instance of autocast (which should occur at the end of each forward pass)
|
||||
// it calls clear_cache() to ensure cached Tensors don't leak outside the autocasting region.
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
thread_local int nesting = 0;
|
||||
}
|
||||
|
||||
|
@ -12,6 +12,7 @@ static void quantize_per_channel_4d_contiguous(benchmark::State& state) {
|
||||
at::Tensor a = at::rand({batches, channels, height, width});
|
||||
at::Tensor scales = at::rand({channels});
|
||||
at::Tensor zero_points = at::randint(
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
0, 10, {channels}, at::TensorOptions().dtype(at::ScalarType::Int));
|
||||
|
||||
at::Tensor qa;
|
||||
@ -32,6 +33,7 @@ static void quantize_per_channel_4d_channels_last(benchmark::State& state) {
|
||||
at::TensorOptions().memory_format(at::MemoryFormat::ChannelsLast));
|
||||
at::Tensor scales = at::rand({channels});
|
||||
at::Tensor zero_points = at::randint(
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
0, 10, {channels}, at::TensorOptions().dtype(at::ScalarType::Int));
|
||||
|
||||
at::Tensor qa;
|
||||
@ -48,6 +50,7 @@ static void quantize_per_channel_2d(benchmark::State& state) {
|
||||
at::Tensor a = at::rand({channels, nelem});
|
||||
at::Tensor scales = at::rand({channels});
|
||||
at::Tensor zero_points = at::randint(
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
0, 10, {channels}, at::TensorOptions().dtype(at::ScalarType::Int));
|
||||
|
||||
at::Tensor qa;
|
||||
@ -60,8 +63,11 @@ static void quantize_per_channel_2d(benchmark::State& state) {
|
||||
static void GenerateSizes4d(benchmark::internal::Benchmark* b) {
|
||||
b->ArgNames({"N", "C", "H", "W"});
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
for (size_t n = 16; n < 256; n *= 2) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
for (size_t c = 4; c < 256; c *= 2) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
for (size_t hw = 4; hw < 256; hw *= 2) {
|
||||
b->Args({n, c, hw, hw});
|
||||
}
|
||||
@ -72,14 +78,19 @@ static void GenerateSizes4d(benchmark::internal::Benchmark* b) {
|
||||
static void GenerateSizes2d(benchmark::internal::Benchmark* b) {
|
||||
b->ArgNames({"C", "N"});
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
for (size_t c = 4; c < 512; c *= 2) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
for (size_t n = 4; n < 512; n *= 2) {
|
||||
b->Args({c, n});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
BENCHMARK(quantize_per_channel_2d)->Apply(GenerateSizes2d);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
BENCHMARK(quantize_per_channel_4d_contiguous)->Apply(GenerateSizes4d);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
BENCHMARK(quantize_per_channel_4d_channels_last)->Apply(GenerateSizes4d);
|
||||
BENCHMARK_MAIN();
|
||||
|
@ -33,8 +33,10 @@ static void stateful_conv1d(benchmark::State& state) {
|
||||
)");
|
||||
|
||||
std::vector<std::vector<torch::jit::IValue>> inputs;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
std::vector<torch::jit::IValue> input;
|
||||
// NOLINTNEXTLINE(modernize-use-emplace)
|
||||
input.push_back(torch::rand({batch_size, input_channels, width}));
|
||||
inputs.push_back(input);
|
||||
}
|
||||
@ -67,10 +69,15 @@ static void GenerateSizes(benchmark::internal::Benchmark* b) {
|
||||
"Width",
|
||||
"Optimized"});
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
for (size_t input_channels = 32; input_channels < 256; input_channels *= 2) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
for (size_t output_channels = 32; output_channels < 256; output_channels *= 2) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
for (size_t kernel = 3; kernel < 8; ++kernel) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
for (size_t batch_size = 1; batch_size < 5; ++batch_size) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
for (size_t width = 32; width < 256; width *= 2) {
|
||||
b->Args({input_channels, output_channels, kernel, batch_size, width, true});
|
||||
b->Args({input_channels, output_channels, kernel, batch_size, width, false});
|
||||
@ -81,5 +88,6 @@ static void GenerateSizes(benchmark::internal::Benchmark* b) {
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
BENCHMARK(stateful_conv1d)->Apply(GenerateSizes);
|
||||
BENCHMARK_MAIN();
|
||||
|
@ -17,7 +17,9 @@ static void tensor_add(benchmark::State& state) {
|
||||
static void GenerateSizes(benchmark::internal::Benchmark* b) {
|
||||
b->ArgNames({"N", "C"});
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
for (size_t n = 8; n < 1024;) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
for (size_t c = 8; c < 1024;) {
|
||||
b->Args({n, c});
|
||||
c *= 2;
|
||||
@ -26,5 +28,6 @@ static void GenerateSizes(benchmark::internal::Benchmark* b) {
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
BENCHMARK(tensor_add)->Apply(GenerateSizes);
|
||||
BENCHMARK_MAIN();
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
namespace at {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
static Symbol kWildcard = Symbol::dimname("*");
|
||||
|
||||
std::ostream& operator<<(std::ostream& out, const Dimname& dimname) {
|
||||
@ -24,6 +25,7 @@ bool Dimname::isValidName(const std::string& name) {
|
||||
return false;
|
||||
}
|
||||
for (auto it = name.begin(); it != name.end(); ++it) {
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone)
|
||||
if (std::isalpha(*it) || *it == '_') {
|
||||
continue;
|
||||
} else if (it != name.begin() && std::isdigit(*it)) {
|
||||
|
@ -60,7 +60,9 @@ static std::tuple<double, int64_t> __printFormat(std::ostream& stream, const Ten
|
||||
break;
|
||||
}
|
||||
}
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
double expMin;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
double expMax;
|
||||
if(offset == size) {
|
||||
expMin = 1;
|
||||
@ -91,9 +93,12 @@ static std::tuple<double, int64_t> __printFormat(std::ostream& stream, const Ten
|
||||
}
|
||||
}
|
||||
double scale = 1;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t sz;
|
||||
if(intMode) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if(expMax > 9) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
sz = 11;
|
||||
stream << std::scientific << std::setprecision(4);
|
||||
} else {
|
||||
@ -102,20 +107,27 @@ static std::tuple<double, int64_t> __printFormat(std::ostream& stream, const Ten
|
||||
}
|
||||
} else {
|
||||
if(expMax-expMin > 4) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
sz = 11;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if(std::fabs(expMax) > 99 || std::fabs(expMin) > 99) {
|
||||
sz = sz + 1;
|
||||
}
|
||||
stream << std::scientific << std::setprecision(4);
|
||||
} else {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if(expMax > 5 || expMax < 0) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
sz = 7;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
scale = std::pow(10, expMax-1);
|
||||
stream << std::fixed << std::setprecision(4);
|
||||
} else {
|
||||
if(expMax == 0) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
sz = 7;
|
||||
} else {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
sz = expMax+6;
|
||||
}
|
||||
stream << std::fixed << std::setprecision(4);
|
||||
@ -138,7 +150,9 @@ static void printScale(std::ostream & stream, double scale) {
|
||||
}
|
||||
static void __printMatrix(std::ostream& stream, const Tensor& self, int64_t linesize, int64_t indent)
|
||||
{
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
double scale;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t sz;
|
||||
std::tie(scale, sz) = __printFormat(stream, self);
|
||||
|
||||
@ -252,7 +266,9 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi
|
||||
stream << "[ " << tensor_.toString() << "{}";
|
||||
} else if(tensor.ndimension() == 1) {
|
||||
if (tensor.numel() > 0) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
double scale;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t sz;
|
||||
std::tie(scale, sz) = __printFormat(stream, tensor);
|
||||
if(scale != 1) {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -5,6 +5,7 @@
|
||||
|
||||
namespace at {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
thread_local bool NamesMode_enabled = true;
|
||||
|
||||
bool NamesMode::is_enabled() {
|
||||
|
@ -96,6 +96,7 @@ public:
|
||||
const index_t* strides_)
|
||||
: TensorAccessorBase<T, 1, PtrTraits, index_t>(data_,sizes_,strides_) {}
|
||||
C10_HOST_DEVICE T & operator[](index_t i) {
|
||||
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
|
||||
return this->data_[this->strides_[0]*i];
|
||||
}
|
||||
C10_HOST_DEVICE const T & operator[](index_t i) const {
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <gtest/gtest.h>
|
||||
#include <caffe2/core/tensor.h>
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(TensorImplTest, Caffe2Constructor) {
|
||||
caffe2::Tensor tensor(caffe2::CPU);
|
||||
ASSERT_EQ(tensor.strides()[0], 1);
|
||||
|
@ -3,6 +3,7 @@
|
||||
namespace at { namespace impl {
|
||||
|
||||
namespace {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
VariableHooksInterface* hooks = nullptr;
|
||||
}
|
||||
|
||||
|
@ -5,6 +5,7 @@
|
||||
namespace at {
|
||||
namespace vitals {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
APIVitals VitalsAPI;
|
||||
|
||||
TorchVitalAttr& TorchVital::create(const std::string& attr) {
|
||||
|
@ -78,6 +78,7 @@ class TORCH_API Blob final : public c10::intrusive_ptr_target {
|
||||
// TODO: after we add Get<Tensor>(DeviceType)
|
||||
// and changed all the callsites, we can add
|
||||
// a static assert here to enforce T != Tensor
|
||||
// NOLINTNEXTLINE(clang-analyzer-core.uninitialized.UndefReturn)
|
||||
return *static_cast<const T*>(pointer_);
|
||||
}
|
||||
|
||||
|
@ -24,6 +24,7 @@ namespace kernels {
|
||||
// The expectXXX() functions further below use these invariants
|
||||
// to check that calling a specific kernels works correctly.
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
optional<tuple<int64_t, int64_t>> called_with_args;
|
||||
|
||||
|
||||
@ -31,6 +32,7 @@ optional<tuple<int64_t, int64_t>> called_with_args;
|
||||
// take in a DispatchKeySet.
|
||||
// The value itself is meaningless for all of the tests that use kernels without a DispatchKeySet argument.
|
||||
// See Note [Plumbing Keys Through The Dispatcher] for details.
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
c10::DispatchKeySet CPU_TEST_SET = c10::DispatchKeySet(c10::DispatchKey::CPU);
|
||||
|
||||
void boxed_func_with_return(const OperatorHandle& /*opHandle*/, Stack* stack) {
|
||||
@ -40,6 +42,7 @@ void boxed_func_with_return(const OperatorHandle& /*opHandle*/, Stack* stack) {
|
||||
called_with_args = tuple<int64_t, int64_t>(stack->at(0).toInt(), stack->at(1).toInt());
|
||||
|
||||
stack->clear();
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
stack->push_back(5);
|
||||
}
|
||||
|
||||
@ -68,6 +71,7 @@ void boxed_func_with_multi_return(const OperatorHandle& /*opHandle*/, Stack* sta
|
||||
struct unboxed_functor_with_return final : OperatorKernel {
|
||||
int64_t operator()(int64_t a, int64_t b) {
|
||||
called_with_args = tuple<int64_t, int64_t>(a, b);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
return 5;
|
||||
}
|
||||
};
|
||||
@ -92,6 +96,7 @@ struct unboxed_functor_without_return_factory final {
|
||||
|
||||
int64_t unboxed_function_with_return(int64_t a, int64_t b) {
|
||||
called_with_args = tuple<int64_t, int64_t>(a, b);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
return 5;
|
||||
}
|
||||
|
||||
@ -99,11 +104,14 @@ void unboxed_function_without_return(int64_t a, int64_t b) {
|
||||
called_with_args = tuple<int64_t, int64_t>(a, b);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
auto unboxed_lambda_with_return = [] (int64_t a, int64_t b) -> int64_t {
|
||||
called_with_args = tuple<int64_t, int64_t>(a, b);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
return 5;
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
auto unboxed_lambda_without_return = [] (int64_t a, int64_t b) -> void{
|
||||
called_with_args = tuple<int64_t, int64_t>(a, b);
|
||||
};
|
||||
@ -259,6 +267,7 @@ void expectOutOfPlaceMultiBoxedCallingWorks(const KernelFunction& func) {
|
||||
OperatorHandle dummy = makeDummyOperatorHandle();
|
||||
|
||||
auto s1 = 1.0f;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto s2 = 2.0f;
|
||||
auto t1 = at::zeros({1});
|
||||
auto t2 = at::zeros({1});
|
||||
@ -359,6 +368,7 @@ void expectOutOfPlaceMultiUnboxedCallingWorks(const KernelFunction& func) {
|
||||
OperatorHandle dummy = makeDummyOperatorHandle();
|
||||
|
||||
auto s1 = 1.0f;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto s2 = 2.0f;
|
||||
auto t1 = at::zeros({1});
|
||||
auto t2 = at::zeros({1});
|
||||
@ -384,16 +394,19 @@ void expectOutOfPlaceMultiUnboxedCallingWorks(const KernelFunction& func) {
|
||||
|
||||
// functional, boxed calling
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenBoxedFunction_withReturn_whenCallingBoxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromBoxedFunction<&kernels::boxed_func_with_return>();
|
||||
kernels::expectBoxedCallingWithReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenBoxedFunction_withoutReturn_whenCallingBoxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromBoxedFunction<&kernels::boxed_func_without_return>();
|
||||
kernels::expectBoxedCallingWithoutReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenBoxedFunction_withMultiReturn_whenCallingBoxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromBoxedFunction<&kernels::boxed_func_with_multi_return>();
|
||||
kernels::expectBoxedCallingWithMultiReturnWorks(func);
|
||||
@ -401,16 +414,19 @@ TEST(KernelFunctionTest, givenBoxedFunction_withMultiReturn_whenCallingBoxed_the
|
||||
|
||||
// in/out, boxed calling
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenBoxedFunction_withInPlaceSignature_whenCallingBoxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromBoxedFunction<&kernels::boxed_func_for_inplace_op>();
|
||||
kernels::expectInPlaceBoxedCallingWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenBoxedFunction_withOutOfPlaceSignature_whenCallingBoxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromBoxedFunction<&kernels::boxed_func_for_outofplace_op>();
|
||||
kernels::expectOutOfPlaceBoxedCallingWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenBoxedFunction_withOutOfPlaceMultiSignature_whenCallingBoxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromBoxedFunction<&kernels::boxed_func_for_outofplace_multi_op>();
|
||||
kernels::expectOutOfPlaceMultiBoxedCallingWorks(func);
|
||||
@ -418,16 +434,19 @@ TEST(KernelFunctionTest, givenBoxedFunction_withOutOfPlaceMultiSignature_whenCal
|
||||
|
||||
// functional, unboxed calling
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenBoxedFunction_withReturn_whenCallingUnboxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromBoxedFunction<&kernels::boxed_func_with_return>();
|
||||
kernels::expectUnboxedCallingWithReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenBoxedFunction_withoutReturn_whenCallingUnboxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromBoxedFunction<&kernels::boxed_func_without_return>();
|
||||
kernels::expectUnboxedCallingWithoutReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenBoxedFunction_withMultiReturn_whenCallingUnboxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromBoxedFunction<&kernels::boxed_func_with_multi_return>();
|
||||
kernels::expectUnboxedCallingWithMultiReturnWorks(func);
|
||||
@ -435,16 +454,19 @@ TEST(KernelFunctionTest, givenBoxedFunction_withMultiReturn_whenCallingUnboxed_t
|
||||
|
||||
// in/out, unboxed calling
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenBoxedFunction_withInPlaceSignature_whenCallingUnboxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromBoxedFunction<&kernels::boxed_func_for_inplace_op>();
|
||||
kernels::expectInPlaceUnboxedCallingWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenBoxedFunction_withOutOfPlaceSignature_whenCallingUnboxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromBoxedFunction<&kernels::boxed_func_for_outofplace_op>();
|
||||
kernels::expectOutOfPlaceUnboxedCallingWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenBoxedFunction_withOutOfPlaceMultiSignature_whenCallingUnboxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromBoxedFunction<&kernels::boxed_func_for_outofplace_multi_op>();
|
||||
kernels::expectOutOfPlaceMultiUnboxedCallingWorks(func);
|
||||
@ -452,81 +474,97 @@ TEST(KernelFunctionTest, givenBoxedFunction_withOutOfPlaceMultiSignature_whenCal
|
||||
|
||||
// functors etc.
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenUnboxedFunctor_withReturn_whenCallingBoxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromUnboxedFunctor<false, kernels::unboxed_functor_with_return>(std::unique_ptr<OperatorKernel>(std::make_unique<kernels::unboxed_functor_with_return>()));
|
||||
kernels::expectBoxedCallingWithReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenUnboxedFunctor_withoutReturn_whenCallingBoxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromUnboxedFunctor<false, kernels::unboxed_functor_without_return>(std::unique_ptr<OperatorKernel>(std::make_unique<kernels::unboxed_functor_without_return>()));
|
||||
kernels::expectBoxedCallingWithoutReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenUnboxedFunctor_withReturn_whenCallingUnboxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromUnboxedFunctor<false, kernels::unboxed_functor_with_return>(std::unique_ptr<OperatorKernel>(std::make_unique<kernels::unboxed_functor_with_return>()));
|
||||
kernels::expectUnboxedCallingWithReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenUnboxedFunctor_withoutReturn_whenCallingUnboxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromUnboxedFunctor<false, kernels::unboxed_functor_without_return>(std::unique_ptr<OperatorKernel>(std::make_unique<kernels::unboxed_functor_without_return>()));
|
||||
kernels::expectUnboxedCallingWithoutReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenUnboxedFunction_withReturn_whenCallingBoxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernels::unboxed_function_with_return));
|
||||
kernels::expectBoxedCallingWithReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenUnboxedFunction_withoutReturn_whenCallingBoxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernels::unboxed_function_without_return));
|
||||
kernels::expectBoxedCallingWithoutReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenUnboxedFunction_withReturn_whenCallingUnboxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernels::unboxed_function_with_return));
|
||||
kernels::expectUnboxedCallingWithReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenUnboxedFunction_withoutReturn_whenCallingUnboxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernels::unboxed_function_without_return));
|
||||
kernels::expectUnboxedCallingWithoutReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenUnboxedRuntimeFunction_withReturn_whenCallingBoxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromUnboxedRuntimeFunction(&kernels::unboxed_function_with_return);
|
||||
kernels::expectBoxedCallingWithReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenUnboxedRuntimeFunction_withoutReturn_whenCallingBoxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromUnboxedRuntimeFunction(&kernels::unboxed_function_without_return);
|
||||
kernels::expectBoxedCallingWithoutReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenUnboxedRuntimeFunction_withReturn_whenCallingUnboxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromUnboxedRuntimeFunction(&kernels::unboxed_function_with_return);
|
||||
kernels::expectUnboxedCallingWithReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenUnboxedRuntimeFunction_withoutReturn_whenCallingUnboxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromUnboxedRuntimeFunction(&kernels::unboxed_function_without_return);
|
||||
kernels::expectUnboxedCallingWithoutReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenUnboxedLambda_withReturn_whenCallingBoxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromUnboxedLambda(kernels::unboxed_lambda_with_return);
|
||||
kernels::expectBoxedCallingWithReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenUnboxedLambda_withoutReturn_whenCallingBoxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromUnboxedLambda(kernels::unboxed_lambda_without_return);
|
||||
kernels::expectBoxedCallingWithoutReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenUnboxedLambda_withReturn_whenCallingUnboxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromUnboxedLambda(kernels::unboxed_lambda_with_return);
|
||||
kernels::expectUnboxedCallingWithReturnWorks(func);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(KernelFunctionTest, givenUnboxedLambda_withoutReturn_whenCallingUnboxed_thenWorks) {
|
||||
KernelFunction func = KernelFunction::makeFromUnboxedLambda(kernels::unboxed_lambda_without_return);
|
||||
kernels::expectUnboxedCallingWithoutReturnWorks(func);
|
||||
|
@ -50,6 +50,7 @@ void expectCallsIncrement(DispatchKey dispatch_key) {
|
||||
// assert that schema and cpu kernel are present
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(6, result[0].toInt());
|
||||
@ -61,21 +62,25 @@ void expectCallsDecrement(DispatchKey dispatch_key) {
|
||||
// assert that schema and cpu kernel are present
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(4, result[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernel_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", &incrementKernel);
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernel_whenRegisteredInConstructor_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators("_test::my_op(Tensor dummy, int input) -> int", &incrementKernel);
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenMultipleOperatorsAndKernels_whenRegisteredInOneRegistrar_thenCallsRightKernel) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::my_op(Tensor dummy, int input) -> int", &incrementKernel)
|
||||
@ -83,12 +88,14 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenMultipleOperatorsA
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenMultipleOperatorsAndKernels_whenRegisteredInMultipleRegistrars_thenCallsRightKernel) {
|
||||
auto registrar1 = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", &incrementKernel);
|
||||
auto registrar2 = RegisterOperators().op("_test::error(Tensor dummy, int input) -> int", &errorKernel);
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernel_whenRegistrationRunsOutOfScope_thenCannotBeCalledAnymore) {
|
||||
{
|
||||
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", &incrementKernel);
|
||||
@ -100,12 +107,14 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernel_whenRegistr
|
||||
expectDoesntFindOperator("_test::my_op");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
bool was_called = false;
|
||||
|
||||
void kernelWithoutOutput(const Tensor&) {
|
||||
was_called = true;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::no_return(Tensor dummy) -> ()", &kernelWithoutOutput);
|
||||
|
||||
@ -122,6 +131,7 @@ std::tuple<> kernelWithZeroOutputs(const Tensor&) {
|
||||
return std::make_tuple();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithZeroOutputs_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::zero_outputs(Tensor dummy) -> ()", &kernelWithZeroOutputs);
|
||||
|
||||
@ -137,6 +147,7 @@ int64_t kernelWithIntOutput(Tensor, int64_t a, int64_t b) {
|
||||
return a + b;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_output(Tensor dummy, int a, int b) -> int", &kernelWithIntOutput);
|
||||
@ -144,6 +155,7 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntOutpu
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_output", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 3, 6);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(9, result[0].toInt());
|
||||
@ -153,6 +165,7 @@ Tensor kernelWithTensorOutput(const Tensor& input) {
|
||||
return input;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithTensorOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::returning_tensor(Tensor input) -> Tensor", &kernelWithTensorOutput);
|
||||
@ -173,6 +186,7 @@ std::vector<Tensor> kernelWithTensorListOutput(const Tensor& input1, const Tenso
|
||||
return {input1, input2, input3};
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithTensorListOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::list_output(Tensor input1, Tensor input2, Tensor input3) -> Tensor[]", &kernelWithTensorListOutput);
|
||||
@ -192,6 +206,7 @@ std::vector<int64_t> kernelWithIntListOutput(const Tensor&, int64_t input1, int6
|
||||
return {input1, input2, input3};
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntListOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::list_output(Tensor dummy, int input1, int input2, int input3) -> int[]", &kernelWithIntListOutput);
|
||||
@ -199,6 +214,7 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntListO
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::list_output", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 2, 4, 6);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(3, result[0].toIntVector().size());
|
||||
@ -213,6 +229,7 @@ std::tuple<Tensor, int64_t, std::vector<Tensor>, c10::optional<int64_t>, Dict<st
|
||||
dict.insert("second", dummyTensor(DispatchKey::CUDA));
|
||||
return std::tuple<Tensor, int64_t, std::vector<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>(
|
||||
dummyTensor(DispatchKey::CUDA),
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
5,
|
||||
{dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)},
|
||||
c10::optional<int64_t>(c10::in_place, 0),
|
||||
@ -220,6 +237,7 @@ std::tuple<Tensor, int64_t, std::vector<Tensor>, c10::optional<int64_t>, Dict<st
|
||||
);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithMultipleOutputs_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::multiple_outputs(Tensor dummy) -> (Tensor, int, Tensor[], int?, Dict(str, Tensor))", &kernelWithMultipleOutputs);
|
||||
@ -248,6 +266,7 @@ Tensor kernelWithTensorInputByReferenceWithOutput(const Tensor& input1) {
|
||||
Tensor kernelWithTensorInputByValueWithOutput(Tensor input1) {
|
||||
return input1;
|
||||
}
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithTensorInputByReference_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> Tensor", &kernelWithTensorInputByReferenceWithOutput);
|
||||
@ -264,6 +283,7 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithTensorIn
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(result[0].toTensor()));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithTensorInputByValue_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> Tensor", &kernelWithTensorInputByValueWithOutput);
|
||||
@ -280,6 +300,7 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithTensorIn
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(result[0].toTensor()));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
Tensor captured_input;
|
||||
|
||||
void kernelWithTensorInputByReferenceWithoutOutput(const Tensor& input1) {
|
||||
@ -290,6 +311,7 @@ void kernelWithTensorInputByValueWithoutOutput(Tensor input1) {
|
||||
captured_input = input1;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithTensorInputByReference_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> ()", &kernelWithTensorInputByReferenceWithoutOutput);
|
||||
@ -306,6 +328,7 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithTensorIn
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(captured_input));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithTensorInputByValue_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> ()", &kernelWithTensorInputByValueWithoutOutput);
|
||||
@ -322,12 +345,14 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithTensorIn
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(captured_input));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
int64_t captured_int_input = 0;
|
||||
|
||||
void kernelWithIntInputWithoutOutput(Tensor, int64_t input1) {
|
||||
captured_int_input = input1;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_input(Tensor dummy, int input) -> ()", &kernelWithIntInputWithoutOutput);
|
||||
@ -345,6 +370,7 @@ int64_t kernelWithIntInputWithOutput(Tensor, int64_t input1) {
|
||||
return input1 + 1;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_input(Tensor dummy, int input) -> int", &kernelWithIntInputWithOutput);
|
||||
@ -357,12 +383,14 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntInput
|
||||
EXPECT_EQ(4, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
int64_t captured_input_list_size = 0;
|
||||
|
||||
void kernelWithIntListInputWithoutOutput(Tensor, const std::vector<int64_t>& input1) {
|
||||
captured_input_list_size = input1.size();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntListInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_list_input(Tensor dummy, int[] input) -> ()", &kernelWithIntListInputWithoutOutput);
|
||||
@ -371,6 +399,7 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntListI
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
captured_input_list_size = 0;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
|
||||
EXPECT_EQ(0, outputs.size());
|
||||
EXPECT_EQ(3, captured_input_list_size);
|
||||
@ -380,6 +409,7 @@ int64_t kernelWithIntListInputWithOutput(Tensor, const std::vector<int64_t>& inp
|
||||
return input1.size();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntListInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_list_input(Tensor dummy, int[] input) -> int", &kernelWithIntListInputWithOutput);
|
||||
@ -387,6 +417,7 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntListI
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_list_input", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
|
||||
EXPECT_EQ(1, outputs.size());
|
||||
EXPECT_EQ(3, outputs[0].toInt());
|
||||
@ -396,6 +427,7 @@ void kernelWithTensorListInputWithoutOutput(const std::vector<Tensor>& input1) {
|
||||
captured_input_list_size = input1.size();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithTensorListInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> ()", &kernelWithTensorListInputWithoutOutput);
|
||||
@ -413,6 +445,7 @@ int64_t kernelWithTensorListInputWithOutput(const std::vector<Tensor>& input1) {
|
||||
return input1.size();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithTensorListInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> int", &kernelWithTensorListInputWithOutput);
|
||||
@ -429,6 +462,7 @@ void kernelWithLegacyTensorVectorInputWithoutOutput(const std::vector<Tensor>& i
|
||||
captured_input_list_size = input1.size();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithLegacyTensorVectorInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> ()", &kernelWithLegacyTensorVectorInputWithoutOutput);
|
||||
@ -446,6 +480,7 @@ int64_t kernelWithLegacyTensorVectorInputWithOutput(const std::vector<Tensor>& i
|
||||
return input1.size();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithLegacyTensorVectorInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> int", &kernelWithLegacyTensorVectorInputWithOutput);
|
||||
@ -462,6 +497,7 @@ void kernelWithLegacyTensorListInputWithoutOutput(std::vector<Tensor> input1) {
|
||||
captured_input_list_size = input1.size();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithLegacyTensorListInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> ()", &kernelWithLegacyTensorListInputWithoutOutput);
|
||||
@ -479,6 +515,7 @@ int64_t kernelWithLegacyTensorListInputWithOutput(std::vector<Tensor> input1) {
|
||||
return input1.size();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithLegacyTensorListInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> int", &kernelWithLegacyTensorListInputWithOutput);
|
||||
@ -495,6 +532,7 @@ std::vector<std::string> kernelWithStringListOutput(std::vector<std::string> inp
|
||||
return input;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithStringListOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::stringlist_output(str[] input) -> str[]", &kernelWithStringListOutput);
|
||||
@ -512,12 +550,14 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithStringLi
|
||||
EXPECT_EQ("value2", output.get(1).toString()->string());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
int captured_dict_size = 0;
|
||||
|
||||
void kernelWithDictInputWithoutOutput(Dict<string, Tensor> input1) {
|
||||
captured_dict_size = input1.size();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithDictInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_input(Dict(str, Tensor) input) -> ()", &kernelWithDictInputWithoutOutput);
|
||||
@ -538,6 +578,7 @@ string kernelWithDictInputWithOutput(Dict<string, string> input1) {
|
||||
return input1.at("key2");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithDictInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_input(Dict(str, str) input) -> str", &kernelWithDictInputWithOutput);
|
||||
@ -557,6 +598,7 @@ Dict<string, string> kernelWithDictOutput(Dict<string, string> input) {
|
||||
return input;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithDictOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_output(Dict(str, str) input) -> Dict(str, str)", &kernelWithDictOutput);
|
||||
@ -580,6 +622,7 @@ void kernelWithUnorderedMapInputWithoutOutput(std::unordered_map<string, Tensor>
|
||||
captured_dict_size = input1.size();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithUnorderedMapInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_input(Dict(str, Tensor) input) -> ()", &kernelWithUnorderedMapInputWithoutOutput);
|
||||
@ -600,6 +643,7 @@ string kernelWithUnorderedMapInputWithOutput(std::unordered_map<string, string>
|
||||
return input1.at("key2");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithUnorderedMapInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_input(Dict(str, str) input) -> str", &kernelWithUnorderedMapInputWithOutput);
|
||||
@ -619,6 +663,7 @@ std::unordered_map<string, string> kernelWithUnorderedMapOutput(std::unordered_m
|
||||
return input;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithUnorderedMapOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_output(Dict(str, str) input) -> Dict(str, str)", &kernelWithUnorderedMapOutput);
|
||||
@ -642,6 +687,7 @@ std::unordered_map<string, std::vector<int64_t>> kernelWithMapOfIntList(std::uno
|
||||
return input;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithMapOfList_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_output(Dict(str, int[]) input) -> Dict(str, int[])", &kernelWithMapOfIntList);
|
||||
@ -650,7 +696,9 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithMapOfLis
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
c10::Dict<string, c10::List<int64_t>> dict;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
dict.insert("key1", c10::List<int64_t>({10, 20}));
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
dict.insert("key2", c10::List<int64_t>({30, 40}));
|
||||
auto outputs = callOp(*op, dict);
|
||||
EXPECT_EQ(1, outputs.size());
|
||||
@ -669,6 +717,7 @@ std::unordered_map<string, std::vector<std::unordered_map<int64_t, string>>> ker
|
||||
return input;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithMapOfListOfMap_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_output(Dict(str, Dict(int,str)[]) input) -> Dict(str, Dict(int,str)[])", &kernelWithMapOfListOfMap);
|
||||
@ -678,11 +727,15 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithMapOfLis
|
||||
|
||||
c10::Dict<string, c10::List<c10::Dict<int64_t, string>>> dict;
|
||||
c10::Dict<int64_t, string> dict1;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
dict1.insert(10, "10");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
dict1.insert(20, "20");
|
||||
dict.insert("key1", c10::List<c10::Dict<int64_t, string>>({dict1}));
|
||||
c10::Dict<int64_t, string> dict2;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
dict2.insert(30, "30");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
dict2.insert(40, "40");
|
||||
dict.insert("key2", c10::List<c10::Dict<int64_t, string>>({dict2}));
|
||||
auto outputs = callOp(*op, dict);
|
||||
@ -703,6 +756,7 @@ std::vector<std::unordered_map<string, int64_t>> kernelWithListOfMap(std::vector
|
||||
return input;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithListOfMap_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::list_output(Dict(str, int)[] input) -> Dict(str, int)[]", &kernelWithListOfMap);
|
||||
@ -734,6 +788,7 @@ std::vector<std::unordered_map<string, std::vector<int64_t>>> kernelWithListOfMa
|
||||
return input;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithListOfMapOfIntList_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::list_output(Dict(str, int[])[] input) -> Dict(str, int[])[]", &kernelWithListOfMapOfIntList);
|
||||
@ -745,7 +800,9 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithListOfMa
|
||||
dict1.insert("1", c10::List<int64_t>({1, 2}));
|
||||
dict1.insert("3", c10::List<int64_t>({3, 4}));
|
||||
c10::Dict<string, c10::List<int64_t>> dict2;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
dict2.insert("5", c10::List<int64_t>({5, 6}));
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
dict2.insert("7", c10::List<int64_t>({7, 8}));
|
||||
c10::List<c10::Dict<string, c10::List<int64_t>>> list({ dict1, dict2 });
|
||||
auto outputs = callOp(*op, list);
|
||||
@ -768,12 +825,14 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithListOfMa
|
||||
EXPECT_EQ(8, output.get(1).toGenericDict().at("7").toIntVector()[1]);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
bool called = false;
|
||||
|
||||
void kernelWithoutInputs() {
|
||||
called = true;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenFallbackKernelWithoutAnyArguments_whenRegistered_thenCanBeCalled) {
|
||||
// note: non-fallback kernels without tensor arguments don't work because there
|
||||
// is no way to get the dispatch key. For operators that only have a fallback
|
||||
@ -793,6 +852,7 @@ int64_t kernelWithoutTensorInputs(int64_t arg) {
|
||||
return arg + 1;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenFallbackKernelWithoutTensorArguments_whenRegistered_thenCanBeCalled) {
|
||||
// note: non-fallback kernels without tensor arguments don't work because there
|
||||
// is no way to get the dispatch key. For operators that only have a fallback
|
||||
@ -808,8 +868,11 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenFallbackKernelWith
|
||||
EXPECT_EQ(4, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
c10::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
c10::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
c10::optional<std::string> called_arg4 = c10::nullopt;
|
||||
|
||||
void kernelWithOptInputWithoutOutput(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
@ -819,6 +882,7 @@ void kernelWithOptInputWithoutOutput(Tensor arg1, const c10::optional<Tensor>& a
|
||||
called_arg4 = arg4;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithOptionalInputs_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> ()", &kernelWithOptInputWithoutOutput);
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""});
|
||||
@ -854,6 +918,7 @@ c10::optional<Tensor> kernelWithOptInputWithOutput(Tensor arg1, const c10::optio
|
||||
return arg2;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithOptionalInputs_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> Tensor?", &kernelWithOptInputWithOutput);
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""});
|
||||
@ -888,6 +953,7 @@ kernelWithOptInputWithMultipleOutputs(Tensor arg1, const c10::optional<Tensor>&
|
||||
return std::make_tuple(arg2, arg3, arg4);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithOptionalInputs_withMultipleOutputs_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> (Tensor?, int?, str?)", &kernelWithOptInputWithMultipleOutputs);
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""});
|
||||
@ -920,6 +986,7 @@ void expectCallsConcatUnboxed(DispatchKey dispatch_key) {
|
||||
EXPECT_EQ("123", result);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernel_whenRegistered_thenCanBeCalledUnboxed) {
|
||||
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, str a, str b, int c) -> str", &concatKernel);
|
||||
expectCallsConcatUnboxed(DispatchKey::CPU);
|
||||
@ -929,6 +996,7 @@ std::tuple<int64_t, Tensor> kernelForSchemaInference(Tensor arg1, int64_t arg2,
|
||||
return {};
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernel_whenRegisteredWithoutSpecifyingSchema_thenInfersSchema) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::no_schema_specified", &kernelForSchemaInference);
|
||||
@ -947,6 +1015,7 @@ template<class... Args> struct kernel_func<void, Args...> final {
|
||||
static void func(Args...) {}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenMismatchedKernel_withDifferentNumArguments_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
@ -983,6 +1052,7 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenMismatchedKernel_w
|
||||
);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenMismatchedKernel_withDifferentArgumentType_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
@ -1002,6 +1072,7 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenMismatchedKernel_w
|
||||
);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenMismatchedKernel_withDifferentNumReturns_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
@ -1061,6 +1132,7 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenMismatchedKernel_w
|
||||
);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenMismatchedKernel_withDifferentReturnTypes_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
|
@ -39,6 +39,7 @@ void expectCallsIncrement(DispatchKey dispatch_key) {
|
||||
// assert that schema and cpu kernel are present
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(6, result[0].toInt());
|
||||
@ -50,16 +51,19 @@ void expectCallsDecrement(DispatchKey dispatch_key) {
|
||||
// assert that schema and cpu kernel are present
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(4, result[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernel_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", RegisterOperators::options().kernel<decltype(incrementKernel), &incrementKernel>(DispatchKey::CPU));
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernel_whenRegisteredWithTorchLibraryAndTorchFn_thenCanBeCalled) {
|
||||
auto m = MAKE_TORCH_LIBRARY(_test);
|
||||
m.def("my_op(Tensor dummy, int input) -> int");
|
||||
@ -67,12 +71,14 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernel_whenRegisteredWit
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenCatchAllKernel_whenRegisteredWithTorchLibraryAndTorchFn_thenCanBeCalled) {
|
||||
auto m = MAKE_TORCH_LIBRARY(_test);
|
||||
m.def("my_op(Tensor dummy, int input) -> int", TORCH_FN(incrementKernel));
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenMultipleOperatorsAndKernels_whenRegisteredInOneRegistrar_thenCallsRightKernel) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::my_op(Tensor dummy, int input) -> int", RegisterOperators::options().kernel<decltype(incrementKernel), &incrementKernel>(DispatchKey::CPU)
|
||||
@ -82,6 +88,7 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenMultipleOperatorsAndKern
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenMultipleOperatorsAndKernels_whenRegisteredInMultipleRegistrars_thenCallsRightKernel) {
|
||||
auto registrar1 = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", RegisterOperators::options().kernel<decltype(incrementKernel), &incrementKernel>(DispatchKey::CPU)
|
||||
.kernel<decltype(errorKernel), &errorKernel>(DispatchKey::CUDA));
|
||||
@ -91,6 +98,7 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenMultipleOperatorsAndKern
|
||||
}
|
||||
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest_FunctionBasedKernel, givenKernel_whenRegistrationRunsOutOfScope_thenCannotBeCalledAnymore) {
|
||||
{
|
||||
auto m = MAKE_TORCH_LIBRARY(_test);
|
||||
@ -115,12 +123,14 @@ TEST(NewOperatorRegistrationTest_FunctionBasedKernel, givenKernel_whenRegistrati
|
||||
expectDoesntFindOperator("_test::my_op");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
bool was_called = false;
|
||||
|
||||
void kernelWithoutOutput(const Tensor&) {
|
||||
was_called = true;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::no_return(Tensor dummy) -> ()", RegisterOperators::options().kernel<decltype(kernelWithoutOutput), &kernelWithoutOutput>(DispatchKey::CPU));
|
||||
|
||||
@ -137,6 +147,7 @@ std::tuple<> kernelWithZeroOutputs(const Tensor&) {
|
||||
return std::make_tuple();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithZeroOutputs_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::zero_outputs(Tensor dummy) -> ()", RegisterOperators::options().kernel<decltype(kernelWithZeroOutputs), &kernelWithZeroOutputs>(DispatchKey::CPU));
|
||||
|
||||
@ -152,6 +163,7 @@ int64_t kernelWithIntOutput(Tensor, int64_t a, int64_t b) {
|
||||
return a + b;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_output(Tensor dummy, int a, int b) -> int", RegisterOperators::options().kernel<decltype(kernelWithIntOutput), &kernelWithIntOutput>(DispatchKey::CPU));
|
||||
@ -159,6 +171,7 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntOutput_when
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_output", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 3, 6);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(9, result[0].toInt());
|
||||
@ -168,6 +181,7 @@ Tensor kernelWithTensorOutput(const Tensor& input) {
|
||||
return input;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithTensorOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::returning_tensor(Tensor input) -> Tensor", RegisterOperators::options().kernel<decltype(kernelWithTensorOutput), &kernelWithTensorOutput>(DispatchKey::CPU)
|
||||
@ -189,6 +203,7 @@ c10::List<Tensor> kernelWithTensorListOutput(const Tensor& input1, const Tensor&
|
||||
return c10::List<Tensor>({input1, input2, input3});
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithTensorListOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::list_output(Tensor input1, Tensor input2, Tensor input3) -> Tensor[]", RegisterOperators::options().kernel<decltype(kernelWithTensorListOutput), &kernelWithTensorListOutput>(DispatchKey::CUDA));
|
||||
@ -208,6 +223,7 @@ c10::List<int64_t> kernelWithIntListOutput(const Tensor&, int64_t input1, int64_
|
||||
return c10::List<int64_t>({input1, input2, input3});
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntListOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::list_output(Tensor dummy, int input1, int input2, int input3) -> int[]", RegisterOperators::options().kernel<decltype(kernelWithIntListOutput), &kernelWithIntListOutput>(DispatchKey::CPU));
|
||||
@ -215,6 +231,7 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntListOutput_
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::list_output", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 2, 4, 6);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(3, result[0].toIntVector().size());
|
||||
@ -229,6 +246,7 @@ std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<stri
|
||||
dict.insert("second", dummyTensor(DispatchKey::CUDA));
|
||||
return std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>(
|
||||
dummyTensor(DispatchKey::CUDA),
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
5,
|
||||
c10::List<Tensor>({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}),
|
||||
c10::optional<int64_t>(c10::in_place, 0),
|
||||
@ -236,6 +254,7 @@ std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<stri
|
||||
);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithMultipleOutputs_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::multiple_outputs(Tensor dummy) -> (Tensor, int, Tensor[], int?, Dict(str, Tensor))", RegisterOperators::options().kernel<decltype(kernelWithMultipleOutputs), &kernelWithMultipleOutputs>(DispatchKey::CPU));
|
||||
@ -265,6 +284,7 @@ Tensor kernelWithTensorInputByValueWithOutput(Tensor input1) {
|
||||
return input1;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithTensorInputByReference_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> Tensor", RegisterOperators::options().kernel<decltype(kernelWithTensorInputByReferenceWithOutput), &kernelWithTensorInputByReferenceWithOutput>(DispatchKey::CPU)
|
||||
@ -282,6 +302,7 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithTensorInputByR
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(result[0].toTensor()));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithTensorInputByValue_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> Tensor", RegisterOperators::options().kernel<decltype(kernelWithTensorInputByValueWithOutput), &kernelWithTensorInputByValueWithOutput>(DispatchKey::CPU)
|
||||
@ -299,6 +320,7 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithTensorInputByV
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(result[0].toTensor()));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
Tensor captured_input;
|
||||
|
||||
void kernelWithTensorInputByReferenceWithoutOutput(const Tensor& input1) {
|
||||
@ -309,6 +331,7 @@ void kernelWithTensorInputByValueWithoutOutput(Tensor input1) {
|
||||
captured_input = input1;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithTensorInputByReference_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> ()", RegisterOperators::options().kernel<decltype(kernelWithTensorInputByReferenceWithoutOutput), &kernelWithTensorInputByReferenceWithoutOutput>(DispatchKey::CPU)
|
||||
@ -326,6 +349,7 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithTensorInputByR
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(captured_input));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithTensorInputByValue_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> ()", RegisterOperators::options().kernel<decltype(kernelWithTensorInputByValueWithoutOutput), &kernelWithTensorInputByValueWithoutOutput>(DispatchKey::CPU)
|
||||
@ -343,12 +367,14 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithTensorInputByV
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(captured_input));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
int64_t captured_int_input = 0;
|
||||
|
||||
void kernelWithIntInputWithoutOutput(Tensor, int64_t input1) {
|
||||
captured_int_input = input1;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_input(Tensor dummy, int input) -> ()", RegisterOperators::options().kernel<decltype(kernelWithIntInputWithoutOutput), &kernelWithIntInputWithoutOutput>(DispatchKey::CPU));
|
||||
@ -366,6 +392,7 @@ int64_t kernelWithIntInputWithOutput(Tensor, int64_t input1) {
|
||||
return input1 + 1;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_input(Tensor dummy, int input) -> int", RegisterOperators::options().kernel<decltype(kernelWithIntInputWithOutput), &kernelWithIntInputWithOutput>(DispatchKey::CPU));
|
||||
@ -378,12 +405,14 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntInput_withO
|
||||
EXPECT_EQ(4, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
int64_t captured_input_list_size = 0;
|
||||
|
||||
void kernelWithIntListInputWithoutOutput(Tensor, const c10::List<int64_t>& input1) {
|
||||
captured_input_list_size = input1.size();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntListInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_list_input(Tensor dummy, int[] input) -> ()", RegisterOperators::options().kernel<decltype(kernelWithIntListInputWithoutOutput), &kernelWithIntListInputWithoutOutput>(DispatchKey::CPU));
|
||||
@ -392,6 +421,7 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntListInput_w
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
captured_input_list_size = 0;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
|
||||
EXPECT_EQ(0, outputs.size());
|
||||
EXPECT_EQ(3, captured_input_list_size);
|
||||
@ -401,6 +431,7 @@ int64_t kernelWithIntListInputWithOutput(Tensor, const c10::List<int64_t>& input
|
||||
return input1.size();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntListInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_list_input(Tensor dummy, int[] input) -> int", RegisterOperators::options().kernel<decltype(kernelWithIntListInputWithOutput), &kernelWithIntListInputWithOutput>(DispatchKey::CPU));
|
||||
@ -408,6 +439,7 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntListInput_w
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_list_input", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
|
||||
EXPECT_EQ(1, outputs.size());
|
||||
EXPECT_EQ(3, outputs[0].toInt());
|
||||
@ -417,6 +449,7 @@ void kernelWithTensorListInputWithoutOutput(const c10::List<Tensor>& input1) {
|
||||
captured_input_list_size = input1.size();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithTensorListInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> ()", RegisterOperators::options().kernel<decltype(kernelWithTensorListInputWithoutOutput), &kernelWithTensorListInputWithoutOutput>(DispatchKey::CPU));
|
||||
@ -434,6 +467,7 @@ int64_t kernelWithTensorListInputWithOutput(const c10::List<Tensor>& input1) {
|
||||
return input1.size();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithTensorListInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> int", RegisterOperators::options().kernel<decltype(kernelWithTensorListInputWithOutput), &kernelWithTensorListInputWithOutput>(DispatchKey::CPU));
|
||||
@ -446,12 +480,14 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithTensorListInpu
|
||||
EXPECT_EQ(2, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
int captured_dict_size = 0;
|
||||
|
||||
void kernelWithDictInputWithoutOutput(Dict<string, Tensor> input1) {
|
||||
captured_dict_size = input1.size();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithDictInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_input(Dict(str, Tensor) input) -> ()", RegisterOperators::options().catchAllKernel<decltype(kernelWithDictInputWithoutOutput), &kernelWithDictInputWithoutOutput>());
|
||||
@ -472,6 +508,7 @@ string kernelWithDictInputWithOutput(Dict<string, string> input1) {
|
||||
return input1.at("key2");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithDictInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_input(Dict(str, str) input) -> str", RegisterOperators::options().catchAllKernel<decltype(kernelWithDictInputWithOutput), &kernelWithDictInputWithOutput>());
|
||||
@ -491,6 +528,7 @@ Dict<string, string> kernelWithDictOutput(Dict<string, string> input) {
|
||||
return input;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithDictOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_output(Dict(str, str) input) -> Dict(str, str)", RegisterOperators::options().catchAllKernel<decltype(kernelWithDictOutput), &kernelWithDictOutput>());
|
||||
@ -510,12 +548,14 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithDictOutput_whe
|
||||
EXPECT_EQ("value2", output.at("key2"));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
bool called = false;
|
||||
|
||||
void kernelWithoutInputs() {
|
||||
called = true;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenFallbackKernelWithoutAnyArguments_whenRegistered_thenCanBeCalled) {
|
||||
// note: non-fallback kernels without tensor arguments don't work because there
|
||||
// is no way to get the dispatch key. For operators that only have a fallback
|
||||
@ -535,6 +575,7 @@ int64_t kernelWithoutTensorInputs(int64_t arg) {
|
||||
return arg + 1;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenFallbackKernelWithoutTensorArguments_whenRegistered_thenCanBeCalled) {
|
||||
// note: non-fallback kernels without tensor arguments don't work because there
|
||||
// is no way to get the dispatch key. For operators that only have a fallback
|
||||
@ -550,8 +591,11 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenFallbackKernelWithoutTen
|
||||
EXPECT_EQ(4, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
c10::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
c10::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
c10::optional<std::string> called_arg4 = c10::nullopt;
|
||||
|
||||
void kernelWithOptInputWithoutOutput(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
@ -561,6 +605,7 @@ void kernelWithOptInputWithoutOutput(Tensor arg1, const c10::optional<Tensor>& a
|
||||
called_arg4 = arg4;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithOptionalInputs_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> ()", RegisterOperators::options().kernel<decltype(kernelWithOptInputWithoutOutput), &kernelWithOptInputWithoutOutput>(DispatchKey::CPU));
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""});
|
||||
@ -596,6 +641,7 @@ c10::optional<Tensor> kernelWithOptInputWithOutput(Tensor arg1, const c10::optio
|
||||
return arg2;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithOptionalInputs_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> Tensor?", RegisterOperators::options().kernel<decltype(kernelWithOptInputWithOutput), &kernelWithOptInputWithOutput>(DispatchKey::CPU));
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""});
|
||||
@ -630,6 +676,7 @@ kernelWithOptInputWithMultipleOutputs(Tensor arg1, const c10::optional<Tensor>&
|
||||
return std::make_tuple(arg2, arg3, arg4);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithOptionalInputs_withMultipleOutputs_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> (Tensor?, int?, str?)", RegisterOperators::options().kernel<decltype(kernelWithOptInputWithMultipleOutputs), &kernelWithOptInputWithMultipleOutputs>(DispatchKey::CPU));
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""});
|
||||
@ -674,6 +721,7 @@ void expectCannotCallConcatBoxed(DispatchKey dispatch_key) {
|
||||
);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernel_whenRegistered_thenCanBeCalledUnboxed) {
|
||||
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, str a, str b, int c) -> str", RegisterOperators::options().kernel<decltype(concatKernel), &concatKernel>(DispatchKey::CPU));
|
||||
expectCallsConcatUnboxed(DispatchKey::CPU);
|
||||
@ -683,6 +731,7 @@ std::tuple<int64_t, Tensor> kernelForSchemaInference(Tensor arg1, int64_t arg2,
|
||||
return {};
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernel_whenRegisteredWithoutSpecifyingSchema_thenInfersSchema) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::no_schema_specified", RegisterOperators::options().catchAllKernel<decltype(kernelForSchemaInference), &kernelForSchemaInference>());
|
||||
@ -701,6 +750,7 @@ template<class... Args> struct kernel_func<void, Args...> final {
|
||||
static void func(Args...) {}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenMismatchedKernel_withDifferentNumArguments_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
@ -737,6 +787,7 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenMismatchedKernel_withDif
|
||||
);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenMismatchedKernel_withDifferentArgumentType_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
@ -756,6 +807,7 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenMismatchedKernel_withDif
|
||||
);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenMismatchedKernel_withDifferentNumReturns_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
@ -815,6 +867,7 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenMismatchedKernel_withDif
|
||||
);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenMismatchedKernel_withDifferentReturnTypes_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
|
@ -36,11 +36,13 @@ void expectCallsIncrement(DispatchKey dispatch_key) {
|
||||
// assert that schema and cpu kernel are present
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(6, result[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernel_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", [] (const Tensor& tensor, int64_t input) -> int64_t {
|
||||
return input + 1;
|
||||
@ -48,6 +50,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernel_whenRegistere
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernel_whenRegisteredInConstructor_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators("_test::my_op(Tensor dummy, int input) -> int", [] (const Tensor& tensor, int64_t input) -> int64_t {
|
||||
return input + 1;
|
||||
@ -55,6 +58,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernel_whenRegistere
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenMultipleOperatorsAndKernels_whenRegisteredInOneRegistrar_thenCallsRightKernel) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::my_op(Tensor dummy, int input) -> int", [] (const Tensor& tensor, int64_t input) -> int64_t {
|
||||
@ -67,6 +71,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenMultipleOperatorsAnd
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenMultipleOperatorsAndKernels_whenRegisteredInMultipleRegistrars_thenCallsRightKernel) {
|
||||
auto registrar1 = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", [] (const Tensor& tensor, int64_t input) -> int64_t {
|
||||
return input + 1;
|
||||
@ -78,6 +83,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenMultipleOperatorsAnd
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernel_whenRegistrationRunsOutOfScope_thenCannotBeCalledAnymore) {
|
||||
{
|
||||
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", [] (const Tensor& tensor, int64_t input) -> int64_t {
|
||||
@ -91,8 +97,10 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernel_whenRegistrat
|
||||
expectDoesntFindOperator("_test::my_op");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
bool was_called = false;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::no_return(Tensor dummy) -> ()", [] (const Tensor&) -> void {
|
||||
was_called = true;
|
||||
@ -106,6 +114,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithoutOutput_
|
||||
EXPECT_EQ(0, result.size());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithZeroOutputs_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::zero_outputs(Tensor dummy) -> ()", [] (const Tensor&) -> std::tuple<> {
|
||||
was_called = true;
|
||||
@ -120,6 +129,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithZeroOutput
|
||||
EXPECT_EQ(0, result.size());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_output(Tensor dummy, int a, int b) -> int", [] (Tensor, int64_t a, int64_t b) -> int64_t {
|
||||
@ -129,11 +139,13 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntOutput_
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_output", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 3, 6);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(9, result[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::returning_tensor(Tensor input) -> Tensor", [] (const Tensor& input) -> Tensor {
|
||||
@ -152,6 +164,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorOutp
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(result[0].toTensor()));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorListOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::list_output(Tensor input1, Tensor input2, Tensor input3) -> Tensor[]", [] (const Tensor& input1, const Tensor& input2, const Tensor& input3) -> std::vector<Tensor> {
|
||||
@ -169,6 +182,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorList
|
||||
EXPECT_EQ(DispatchKey::CPU, extractDispatchKey(result[0].toTensorVector()[2]));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntListOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::list_output(Tensor dummy, int input1, int input2, int input3) -> int[]", [](const Tensor&, int64_t input1, int64_t input2, int64_t input3) -> std::vector<int64_t> {
|
||||
@ -178,6 +192,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntListOut
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::list_output", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 2, 4, 6);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(3, result[0].toIntVector().size());
|
||||
@ -186,6 +201,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntListOut
|
||||
EXPECT_EQ(6, result[0].toIntVector()[2]);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithMultipleOutputs_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::multiple_outputs(Tensor dummy) -> (Tensor, int, Tensor[], int?, Dict(str, Tensor))", [] (Tensor) -> std::tuple<Tensor, int64_t, std::vector<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>> {
|
||||
@ -194,6 +210,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithMultipleOu
|
||||
dict.insert("second", dummyTensor(DispatchKey::CUDA));
|
||||
return std::tuple<Tensor, int64_t, std::vector<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>(
|
||||
dummyTensor(DispatchKey::CUDA),
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
5,
|
||||
{dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)},
|
||||
c10::optional<int64_t>(c10::in_place, 0),
|
||||
@ -218,6 +235,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithMultipleOu
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(result_dict.at("second")));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorInputByReference_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> Tensor", [] (const Tensor& input1) -> Tensor {
|
||||
@ -236,6 +254,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorInpu
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(result[0].toTensor()));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorInputByValue_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> Tensor", [](Tensor input1) -> Tensor {
|
||||
@ -254,8 +273,10 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorInpu
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(result[0].toTensor()));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
Tensor captured_input;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorInputByReference_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> ()", [] (const Tensor& input1) -> void {
|
||||
@ -274,6 +295,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorInpu
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(captured_input));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorInputByValue_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> ()", [] (Tensor input1) -> void {
|
||||
@ -292,8 +314,10 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorInpu
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(captured_input));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
int64_t captured_int_input = 0;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_input(Tensor dummy, int input) -> ()", [](Tensor, int64_t input1) -> void {
|
||||
@ -309,6 +333,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntInput_w
|
||||
EXPECT_EQ(3, captured_int_input);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_input(Tensor dummy, int input) -> int", [] (Tensor, int64_t input1) -> int64_t {
|
||||
@ -323,8 +348,10 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntInput_w
|
||||
EXPECT_EQ(4, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
int64_t captured_input_list_size = 0;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntListInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_list_input(Tensor dummy, int[] input) -> ()", [] (Tensor, const std::vector<int64_t>& input1) -> void {
|
||||
@ -335,11 +362,13 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntListInp
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
captured_input_list_size = 0;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
|
||||
EXPECT_EQ(0, outputs.size());
|
||||
EXPECT_EQ(3, captured_input_list_size);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntListInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_list_input(Tensor dummy, int[] input) -> int", [](Tensor, const std::vector<int64_t>& input1) -> int64_t {
|
||||
@ -349,11 +378,13 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntListInp
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_list_input", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
|
||||
EXPECT_EQ(1, outputs.size());
|
||||
EXPECT_EQ(3, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorListInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> ()", [] (const std::vector<Tensor>& input1) -> void {
|
||||
@ -369,6 +400,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorList
|
||||
EXPECT_EQ(2, captured_input_list_size);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorVectorInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> int", [] (const std::vector<Tensor>& input1) -> int64_t {
|
||||
@ -383,6 +415,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorVect
|
||||
EXPECT_EQ(2, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithLegacyTensorVectorInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> ()", [] (const std::vector<Tensor>& input1) -> void {
|
||||
@ -398,6 +431,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithLegacyTens
|
||||
EXPECT_EQ(2, captured_input_list_size);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithLegacyTensorVectorInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> int", [] (const std::vector<Tensor>& input1) -> int64_t {
|
||||
@ -412,6 +446,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithLegacyTens
|
||||
EXPECT_EQ(2, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithLegacyTensorListInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> ()", [] (std::vector<Tensor> input1) -> void {
|
||||
@ -427,6 +462,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithLegacyTens
|
||||
EXPECT_EQ(2, captured_input_list_size);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithLegacyTensorListInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> int", [] (std::vector<Tensor> input1) -> int64_t {
|
||||
@ -441,6 +477,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithLegacyTens
|
||||
EXPECT_EQ(2, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithStringListOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::stringlist_output(str[] input) -> str[]", [](std::vector<std::string> input) {
|
||||
@ -460,6 +497,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithStringList
|
||||
EXPECT_EQ("value2", output.get(1).toString()->string());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithDictInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
int captured_dict_size = 0;
|
||||
|
||||
@ -480,6 +518,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithDictInput_
|
||||
EXPECT_EQ(2, captured_dict_size);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithDictInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_input(Dict(str, str) input) -> str", [&] (Dict<string, string> input1) {
|
||||
@ -497,6 +536,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithDictInput_
|
||||
EXPECT_EQ("value2", outputs[0].toString()->string());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithDictOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_output(Dict(str, str) input) -> Dict(str, str)", [] (Dict<string, string> input) {
|
||||
@ -518,6 +558,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithDictOutput
|
||||
EXPECT_EQ("value2", output.at("key2"));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithUnorderedMapInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
int captured_dict_size = 0;
|
||||
|
||||
@ -538,6 +579,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithUnorderedM
|
||||
EXPECT_EQ(2, captured_dict_size);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithUnorderedMapInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_input(Dict(str, str) input) -> str", [&] (std::unordered_map<string, string> input1) {
|
||||
@ -555,6 +597,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithUnorderedM
|
||||
EXPECT_EQ("value2", outputs[0].toString()->string());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithUnorderedMapOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_output(Dict(str, str) input) -> Dict(str, str)", [] (std::unordered_map<string, string> input) {
|
||||
@ -576,6 +619,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithUnorderedM
|
||||
EXPECT_EQ("value2", output.at("key2"));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithMapOfList_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_output(Dict(str, int[]) input) -> Dict(str, int[])", [](std::unordered_map<string, std::vector<int64_t>> input) {
|
||||
@ -586,7 +630,9 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithMapOfList_
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
c10::Dict<string, c10::List<int64_t>> dict;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
dict.insert("key1", c10::List<int64_t>({10, 20}));
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
dict.insert("key2", c10::List<int64_t>({30, 40}));
|
||||
auto outputs = callOp(*op, dict);
|
||||
EXPECT_EQ(1, outputs.size());
|
||||
@ -602,6 +648,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithMapOfList_
|
||||
}
|
||||
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithMapOfListOfMap_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_output(Dict(str, Dict(int,str)[]) input) -> Dict(str, Dict(int,str)[])", [](std::unordered_map<string, std::vector<std::unordered_map<int64_t, string>>> input) {
|
||||
@ -613,11 +660,15 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithMapOfListO
|
||||
|
||||
c10::Dict<string, c10::List<c10::Dict<int64_t, string>>> dict;
|
||||
c10::Dict<int64_t, string> dict1;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
dict1.insert(10, "10");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
dict1.insert(20, "20");
|
||||
dict.insert("key1", c10::List<c10::Dict<int64_t, string>>({dict1}));
|
||||
c10::Dict<int64_t, string> dict2;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
dict2.insert(30, "30");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
dict2.insert(40, "40");
|
||||
dict.insert("key2", c10::List<c10::Dict<int64_t, string>>({dict2}));
|
||||
auto outputs = callOp(*op, dict);
|
||||
@ -634,6 +685,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithMapOfListO
|
||||
EXPECT_EQ("40", output.at("key2").get(0).at(40));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithListOfMap_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::list_output(Dict(str, int)[] input) -> Dict(str, int)[]", [](std::vector<std::unordered_map<string, int64_t>> input) {
|
||||
@ -663,6 +715,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithListOfMap_
|
||||
EXPECT_EQ(4, output.get(1).toGenericDict().at("4").toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithListOfMapOfIntList_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::list_output(Dict(str, int[])[] input) -> Dict(str, int[])[]", [](std::vector<std::unordered_map<string, std::vector<int64_t>>> input) {
|
||||
@ -676,7 +729,9 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithListOfMapO
|
||||
dict1.insert("1", c10::List<int64_t>({1, 2}));
|
||||
dict1.insert("3", c10::List<int64_t>({3, 4}));
|
||||
c10::Dict<string, c10::List<int64_t>> dict2;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
dict2.insert("5", c10::List<int64_t>({5, 6}));
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
dict2.insert("7", c10::List<int64_t>({7, 8}));
|
||||
c10::List<c10::Dict<string, c10::List<int64_t>>> list({ dict1, dict2 });
|
||||
auto outputs = callOp(*op, list);
|
||||
@ -699,6 +754,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithListOfMapO
|
||||
EXPECT_EQ(8, output.get(1).toGenericDict().at("7").toIntVector()[1]);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenFallbackKernelWithoutAnyArguments_whenRegistered_thenCanBeCalled) {
|
||||
// note: non-fallback kernels without tensor arguments don't work because there
|
||||
// is no way to get the dispatch key. For operators that only have a fallback
|
||||
@ -715,6 +771,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenFallbackKernelWithou
|
||||
EXPECT_TRUE(called);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenFallbackKernelWithoutTensorArguments_whenRegistered_thenCanBeCalled) {
|
||||
// note: non-fallback kernels without tensor arguments don't work because there
|
||||
// is no way to get the dispatch key. For operators that only have a fallback
|
||||
@ -730,7 +787,9 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenFallbackKernelWithou
|
||||
EXPECT_EQ(4, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithOptionalInputs_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
bool called;
|
||||
c10::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
c10::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
@ -769,7 +828,9 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithOptionalIn
|
||||
EXPECT_FALSE(called_arg4.has_value());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithOptionalInputs_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
bool called;
|
||||
c10::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
c10::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
@ -811,7 +872,9 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithOptionalIn
|
||||
EXPECT_FALSE(called_arg4.has_value());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithOptionalInputs_withMultipleOutputs_whenRegistered_thenCanBeCalled) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
bool called;
|
||||
c10::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
c10::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
@ -848,6 +911,7 @@ void expectCallsConcatUnboxed(DispatchKey dispatch_key) {
|
||||
EXPECT_EQ("prefix123", result);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernel_whenRegistered_thenCanBeCalledUnboxed) {
|
||||
std::string prefix = "prefix";
|
||||
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, str a, str b, int c) -> str", [&] (const Tensor& tensor1, std::string a, const std::string& b, int64_t c) {
|
||||
@ -856,6 +920,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernel_whenRegistere
|
||||
expectCallsConcatUnboxed(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernel_whenRegisteredWithoutSpecifyingSchema_thenInfersSchema) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::no_schema_specified", [] (Tensor arg1, int64_t arg2, const std::vector<Tensor>& arg3) -> std::tuple<int64_t, Tensor> {return {};});
|
||||
@ -867,6 +932,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernel_whenRegistere
|
||||
EXPECT_FALSE(differences.has_value());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenMismatchedKernel_withDifferentNumArguments_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
@ -903,6 +969,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenMismatchedKernel_wit
|
||||
);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenMismatchedKernel_withDifferentArgumentType_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
@ -922,6 +989,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenMismatchedKernel_wit
|
||||
);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenMismatchedKernel_withDifferentNumReturns_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
@ -981,6 +1049,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenMismatchedKernel_wit
|
||||
);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenMismatchedKernel_withDifferentReturnTypes_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
|
@ -26,6 +26,7 @@ void expectCallsIncrement(DispatchKey dispatch_key) {
|
||||
// assert that schema and cpu kernel are present
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(6, result[0].toInt());
|
||||
@ -37,22 +38,26 @@ void expectCallsDecrement(DispatchKey dispatch_key) {
|
||||
// assert that schema and cpu kernel are present
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(4, result[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernel_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor, int64_t i) {return i+1;}));
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenOutOfLineKernel_whenRegistered_thenCanBeCalled) {
|
||||
auto my_kernel = [] (Tensor, int64_t i) {return i+1;};
|
||||
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", RegisterOperators::options().kernel(DispatchKey::CPU, my_kernel));
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenMultipleOperatorsAndKernels_whenRegisteredInOneRegistrar_thenCallsRightKernel) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::my_op(Tensor dummy, int input) -> int", RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor, int64_t i) {return i+1;})
|
||||
@ -62,6 +67,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenMultipleOperatorsAndKernel
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenMultipleOperatorsAndKernels_whenRegisteredInMultipleRegistrars_thenCallsRightKernel) {
|
||||
auto registrar1 = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor, int64_t i) {return i+1;})
|
||||
.kernel(DispatchKey::CUDA, [] (Tensor, int64_t) -> int64_t {EXPECT_TRUE(false); return 0;}));
|
||||
@ -70,6 +76,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenMultipleOperatorsAndKernel
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernel_whenRegistrationRunsOutOfScope_thenCannotBeCalledAnymore) {
|
||||
{
|
||||
auto m = MAKE_TORCH_LIBRARY(_test);
|
||||
@ -94,8 +101,10 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernel_whenRegistrationRun
|
||||
expectDoesntFindOperator("_test::my_op");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
bool was_called = false;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::no_return(Tensor dummy) -> ()",
|
||||
RegisterOperators::options()
|
||||
@ -109,6 +118,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithoutOutput_whenRe
|
||||
EXPECT_EQ(0, result.size());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithZeroOutputs_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::zero_outputs(Tensor dummy) -> ()",
|
||||
RegisterOperators::options().kernel(DispatchKey::CPU, [] (const Tensor&) -> std::tuple<> {was_called = true; return {};}));
|
||||
@ -121,6 +131,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithZeroOutputs_when
|
||||
EXPECT_EQ(0, result.size());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_output(Tensor dummy, int a, int b) -> int",
|
||||
@ -129,11 +140,13 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntOutput_whenRe
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_output", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 3, 6);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(9, result[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithTensorOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::returning_tensor(Tensor input) -> Tensor",
|
||||
@ -152,6 +165,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithTensorOutput_whe
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(result[0].toTensor()));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithTensorListOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::list_output(Tensor input1, Tensor input2, Tensor input3) -> Tensor[]",
|
||||
@ -168,6 +182,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithTensorListOutput
|
||||
EXPECT_EQ(DispatchKey::CPU, extractDispatchKey(result[0].toTensorVector()[2]));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntListOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::list_output(Tensor dummy, int input1, int input2, int input3) -> int[]",
|
||||
@ -176,6 +191,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntListOutput_wh
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::list_output", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 2, 4, 6);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(3, result[0].toIntVector().size());
|
||||
@ -184,6 +200,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntListOutput_wh
|
||||
EXPECT_EQ(6, result[0].toIntVector()[2]);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithMultipleOutputs_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::multiple_outputs(Tensor dummy) -> (Tensor, int, Tensor[], int?, Dict(str, Tensor))",
|
||||
@ -193,6 +210,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithMultipleOutputs_
|
||||
dict.insert("second", dummyTensor(DispatchKey::CUDA));
|
||||
return std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>(
|
||||
dummyTensor(DispatchKey::CUDA),
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
5,
|
||||
c10::List<Tensor>({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}),
|
||||
c10::optional<int64_t>(c10::in_place, 0),
|
||||
@ -217,6 +235,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithMultipleOutputs_
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(result_dict.at("second")));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithTensorInputByReference_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> Tensor",
|
||||
@ -235,6 +254,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithTensorInputByRef
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(result[0].toTensor()));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithTensorInputByValue_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> Tensor",
|
||||
@ -253,8 +273,10 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithTensorInputByVal
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(result[0].toTensor()));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
Tensor captured_input;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithTensorInputByReference_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> ()",
|
||||
@ -273,6 +295,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithTensorInputByRef
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(captured_input));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithTensorInputByValue_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> ()",
|
||||
@ -291,8 +314,10 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithTensorInputByVal
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(captured_input));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
int64_t captured_int_input = 0;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_input(Tensor dummy, int input) -> ()",
|
||||
@ -307,6 +332,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntInput_without
|
||||
EXPECT_EQ(3, captured_int_input);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_input(Tensor dummy, int input) -> int",
|
||||
@ -320,8 +346,10 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntInput_withOut
|
||||
EXPECT_EQ(4, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
int64_t captured_input_list_size = 0;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntListInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_list_input(Tensor dummy, int[] input) -> ()",
|
||||
@ -331,11 +359,13 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntListInput_wit
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
captured_input_list_size = 0;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
|
||||
EXPECT_EQ(0, outputs.size());
|
||||
EXPECT_EQ(3, captured_input_list_size);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntListInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_list_input(Tensor dummy, int[] input) -> int",
|
||||
@ -344,11 +374,13 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntListInput_wit
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_list_input", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
|
||||
EXPECT_EQ(1, outputs.size());
|
||||
EXPECT_EQ(3, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithTensorListInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> ()",
|
||||
@ -363,6 +395,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithTensorListInput_
|
||||
EXPECT_EQ(2, captured_input_list_size);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithTensorListInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> int",
|
||||
@ -376,8 +409,10 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithTensorListInput_
|
||||
EXPECT_EQ(2, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
int captured_dict_size = 0;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithDictInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_input(Dict(str, Tensor) input) -> ()", RegisterOperators::options().catchAllKernel([] (Dict<string, Tensor> input1) {
|
||||
@ -396,6 +431,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithDictInput_withou
|
||||
EXPECT_EQ(2, captured_dict_size);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithDictInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_input(Dict(str, str) input) -> str", RegisterOperators::options().catchAllKernel([] (Dict<string, string> input1) {
|
||||
@ -413,6 +449,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithDictInput_withOu
|
||||
EXPECT_EQ("value2", outputs[0].toString()->string());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithDictOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_output(Dict(str, str) input) -> Dict(str, str)", RegisterOperators::options().catchAllKernel([] (Dict<string, string> input) {
|
||||
@ -434,8 +471,10 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithDictOutput_whenR
|
||||
EXPECT_EQ("value2", output.at("key2"));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
bool called = false;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenFallbackKernelWithoutAnyArguments_whenRegistered_thenCanBeCalled) {
|
||||
// note: non-fallback kernels without tensor arguments don't work because there
|
||||
// is no way to get the dispatch key. For operators that only have a fallback
|
||||
@ -451,6 +490,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenFallbackKernelWithoutAnyAr
|
||||
EXPECT_TRUE(called);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenFallbackKernelWithoutTensorArguments_whenRegistered_thenCanBeCalled) {
|
||||
// note: non-fallback kernels without tensor arguments don't work because there
|
||||
// is no way to get the dispatch key. For operators that only have a fallback
|
||||
@ -466,10 +506,14 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenFallbackKernelWithoutTenso
|
||||
EXPECT_EQ(4, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
c10::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
c10::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
c10::optional<std::string> called_arg4 = c10::nullopt;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithOptionalInputs_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op(
|
||||
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> ()",
|
||||
@ -504,6 +548,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithOptionalInputs_w
|
||||
EXPECT_FALSE(called_arg4.has_value());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithOptionalInputs_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op(
|
||||
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> Tensor?",
|
||||
@ -541,6 +586,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithOptionalInputs_w
|
||||
EXPECT_FALSE(called_arg4.has_value());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithOptionalInputs_withMultipleOutputs_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op(
|
||||
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> (Tensor?, int?, str?)",
|
||||
@ -573,6 +619,7 @@ void expectCallsConcatUnboxed(DispatchKey dispatch_key) {
|
||||
EXPECT_EQ("123", result);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernel_whenRegistered_thenCanBeCalledUnboxed) {
|
||||
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, str a, str b, int c) -> str", torch::RegisterOperators::options()
|
||||
.kernel(DispatchKey::CPU, [] (const Tensor& tensor1, std::string a, const std::string& b, int64_t c) {
|
||||
@ -581,6 +628,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernel_whenRegistered_then
|
||||
expectCallsConcatUnboxed(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernel_whenRegisteredWithoutSpecifyingSchema_thenInfersSchema) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::no_schema_specified", RegisterOperators::options().catchAllKernel([] (Tensor arg1, int64_t arg2, const c10::List<Tensor>& arg3) -> std::tuple<int64_t, Tensor> {return {};}));
|
||||
@ -592,6 +640,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernel_whenRegisteredWitho
|
||||
EXPECT_FALSE(differences.has_value());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenMismatchedKernel_withDifferentNumArguments_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
@ -628,6 +677,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenMismatchedKernel_withDiffe
|
||||
);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenMismatchedKernel_withDifferentArgumentType_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
@ -647,6 +697,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenMismatchedKernel_withDiffe
|
||||
);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenMismatchedKernel_withDifferentNumReturns_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
@ -706,6 +757,7 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenMismatchedKernel_withDiffe
|
||||
);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenMismatchedKernel_withDifferentReturnTypes_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
|
@ -34,6 +34,7 @@ void decrementKernel(const OperatorHandle&, Stack* stack) {
|
||||
torch::jit::push(*stack, input - 1);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
bool called_redispatching_kernel = false;
|
||||
void redispatchingKernel_with_DispatchKeySet(const OperatorHandle& op, c10::DispatchKeySet ks, Stack* stack) {
|
||||
// this kernel is a no-op- it just redispatches to the lower-priority kernel
|
||||
@ -48,6 +49,7 @@ void expectCallsIncrement(c10::DispatchKeySet ks) {
|
||||
// assert that schema and cpu kernel are present
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(ks), 5);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(6, result[0].toInt());
|
||||
@ -63,6 +65,7 @@ void expectCallsIncrementUnboxed(DispatchKey dispatch_key) {
|
||||
// assert that schema and cpu kernel are present
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
int64_t result = callOpUnboxed<int64_t, at::Tensor, int64_t>(*op, dummyTensor(dispatch_key), 5);
|
||||
EXPECT_EQ(6, result);
|
||||
}
|
||||
@ -73,16 +76,19 @@ void expectCallsDecrement(DispatchKey dispatch_key) {
|
||||
// assert that schema and cpu kernel are present
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(4, result[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_StackBasedKernel, givenKernel_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", RegisterOperators::options().kernel<&incrementKernel>(DispatchKey::CPU));
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_StackBasedKernel, givenMultipleOperatorsAndKernels_whenRegisteredInOneRegistrar_thenCallsRightKernel) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::my_op(Tensor dummy, int input) -> int", RegisterOperators::options().kernel<&incrementKernel>(DispatchKey::CPU)
|
||||
@ -92,6 +98,7 @@ TEST(OperatorRegistrationTest_StackBasedKernel, givenMultipleOperatorsAndKernels
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_StackBasedKernel, givenMultipleOperatorsAndKernels_whenRegisteredInMultipleRegistrars_thenCallsRightKernel) {
|
||||
auto registrar1 = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", RegisterOperators::options().kernel<&incrementKernel>(DispatchKey::CPU)
|
||||
.kernel<&errorKernel>(DispatchKey::CUDA));
|
||||
@ -100,6 +107,7 @@ TEST(OperatorRegistrationTest_StackBasedKernel, givenMultipleOperatorsAndKernels
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_StackBasedKernel, givenKernel_whenRegistrationRunsOutOfScope_thenCannotBeCalledAnymore) {
|
||||
{
|
||||
auto m = MAKE_TORCH_LIBRARY(_test);
|
||||
@ -124,12 +132,14 @@ TEST(OperatorRegistrationTest_StackBasedKernel, givenKernel_whenRegistrationRuns
|
||||
expectDoesntFindOperator("_test::my_op");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
bool called = false;
|
||||
|
||||
void kernelWithoutInputs(const OperatorHandle&, Stack*) {
|
||||
called = true;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_StackBasedKernel, givenFallbackKernelWithoutAnyArguments_whenRegistered_thenCanBeCalled) {
|
||||
// note: non-fallback kernels without tensor arguments don't work because there
|
||||
// is no way to get the dispatch key. For operators that only have a fallback
|
||||
@ -149,6 +159,7 @@ void kernelWithoutTensorInputs(const OperatorHandle&, Stack* stack) {
|
||||
stack->back() = stack->back().toInt() + 1;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_StackBasedKernel, givenFallbackKernelWithoutTensorArguments_whenRegistered_thenCanBeCalled) {
|
||||
// note: non-fallback kernels without tensor arguments don't work because there
|
||||
// is no way to get the dispatch key. For operators that only have a fallback
|
||||
@ -167,17 +178,20 @@ TEST(OperatorRegistrationTest_StackBasedKernel, givenFallbackKernelWithoutTensor
|
||||
void kernelForSchemaInference(const OperatorHandle&, Stack* stack) {
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_StackBasedKernel, givenKernel_whenRegisteredWithoutSpecifyingSchema_thenFailsBecauseItCannotInferFromStackBasedKernel) {
|
||||
expectThrows<c10::Error>([] {
|
||||
RegisterOperators().op("_test::no_schema_specified", RegisterOperators::options().catchAllKernel<&kernelForSchemaInference>());
|
||||
}, "Cannot infer operator schema for this kind of kernel in registration of operator _test::no_schema_specified");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_StackBasedKernel, givenKernel_whenRegistered_thenCanAlsoBeCalledUnboxed) {
|
||||
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", RegisterOperators::options().kernel<&incrementKernel>(DispatchKey::CPU));
|
||||
expectCallsIncrementUnboxed(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_StackBasedKernel, callKernelsWithDispatchKeySetConvention_redispatchesToLowerPriorityKernels) {
|
||||
auto m = MAKE_TORCH_LIBRARY(_test);
|
||||
m.def("my_op(Tensor dummy, int input) -> int");
|
||||
|
@ -46,6 +46,7 @@ void expectCallsIncrement(DispatchKey dispatch_key) {
|
||||
// assert that schema and cpu kernel are present
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(6, result[0].toInt());
|
||||
@ -57,16 +58,19 @@ void expectCallsDecrement(DispatchKey dispatch_key) {
|
||||
// assert that schema and cpu kernel are present
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(4, result[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernel_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", RegisterOperators::options().kernel<IncrementKernel>(DispatchKey::CPU));
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenMultipleOperatorsAndKernels_whenRegisteredInOneRegistrar_thenCallsRightKernel) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::my_op(Tensor dummy, int input) -> int", RegisterOperators::options().kernel<IncrementKernel>(DispatchKey::CPU)
|
||||
@ -76,6 +80,7 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenMultipleOperatorsAndKerne
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenMultipleOperatorsAndKernels_whenRegisteredInMultipleRegistrars_thenCallsRightKernel) {
|
||||
auto registrar1 = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", RegisterOperators::options().kernel<IncrementKernel>(DispatchKey::CPU)
|
||||
.kernel<ErrorKernel>(DispatchKey::CUDA));
|
||||
@ -84,6 +89,7 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenMultipleOperatorsAndKerne
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
bool was_called = false;
|
||||
|
||||
struct KernelWithoutOutput final : OperatorKernel {
|
||||
@ -92,6 +98,7 @@ struct KernelWithoutOutput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::no_return(Tensor dummy) -> ()", RegisterOperators::options().kernel<KernelWithoutOutput>(DispatchKey::CPU));
|
||||
|
||||
@ -110,6 +117,7 @@ struct KernelWithZeroOutputs final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithZeroOutputs_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::zero_outputs(Tensor dummy) -> ()", RegisterOperators::options().kernel<KernelWithZeroOutputs>(DispatchKey::CPU));
|
||||
|
||||
@ -127,6 +135,7 @@ struct KernelWithIntOutput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_output(Tensor dummy, int a, int b) -> int", RegisterOperators::options().kernel<KernelWithIntOutput>(DispatchKey::CPU));
|
||||
@ -134,6 +143,7 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntOutput_whenR
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_output", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 3, 6);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(9, result[0].toInt());
|
||||
@ -145,6 +155,7 @@ struct KernelWithTensorOutput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithTensorOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::returning_tensor(Tensor input) -> Tensor", RegisterOperators::options().kernel<KernelWithTensorOutput>(DispatchKey::CPU)
|
||||
@ -168,6 +179,7 @@ struct KernelWithTensorListOutput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithTensorListOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::list_output(Tensor input1, Tensor input2, Tensor input3) -> Tensor[]", RegisterOperators::options().kernel<KernelWithTensorListOutput>(DispatchKey::CUDA));
|
||||
@ -189,6 +201,7 @@ struct KernelWithIntListOutput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntListOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::list_output(Tensor dummy, int input1, int input2, int input3) -> int[]", RegisterOperators::options().kernel<KernelWithIntListOutput>(DispatchKey::CPU));
|
||||
@ -196,6 +209,7 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntListOutput_w
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::list_output", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 2, 4, 6);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(3, result[0].toIntVector().size());
|
||||
@ -211,6 +225,7 @@ struct KernelWithMultipleOutputs final : OperatorKernel {
|
||||
dict.insert("second", dummyTensor(DispatchKey::CUDA));
|
||||
return std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>(
|
||||
dummyTensor(DispatchKey::CUDA),
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
5,
|
||||
c10::List<Tensor>({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}),
|
||||
c10::optional<int64_t>(c10::in_place, 0),
|
||||
@ -219,6 +234,7 @@ struct KernelWithMultipleOutputs final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithMultipleOutputs_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::multiple_outputs(Tensor dummy) -> (Tensor, int, Tensor[], int?, Dict(str, Tensor))", RegisterOperators::options().kernel<KernelWithMultipleOutputs>(DispatchKey::CPU));
|
||||
@ -252,6 +268,7 @@ struct KernelWithTensorInputByValueWithOutput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithTensorInputByReference_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> Tensor", RegisterOperators::options().kernel<KernelWithTensorInputByReferenceWithOutput>(DispatchKey::CPU)
|
||||
@ -269,6 +286,7 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithTensorInputByRe
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(result[0].toTensor()));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithTensorInputByValue_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> Tensor", RegisterOperators::options().kernel<KernelWithTensorInputByValueWithOutput>(DispatchKey::CPU)
|
||||
@ -286,6 +304,7 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithTensorInputByVa
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(result[0].toTensor()));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
Tensor captured_input;
|
||||
|
||||
struct KernelWithTensorInputByReferenceWithoutOutput final : OperatorKernel {
|
||||
@ -300,6 +319,7 @@ struct KernelWithTensorInputByValueWithoutOutput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithTensorInputByReference_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> ()", RegisterOperators::options().kernel<KernelWithTensorInputByReferenceWithoutOutput>(DispatchKey::CPU)
|
||||
@ -317,6 +337,7 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithTensorInputByRe
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(captured_input));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithTensorInputByValue_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_input(Tensor input) -> ()", RegisterOperators::options().kernel<KernelWithTensorInputByValueWithoutOutput>(DispatchKey::CPU)
|
||||
@ -334,6 +355,7 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithTensorInputByVa
|
||||
EXPECT_EQ(DispatchKey::CUDA, extractDispatchKey(captured_input));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
int64_t captured_int_input = 0;
|
||||
|
||||
struct KernelWithIntInputWithoutOutput final : OperatorKernel {
|
||||
@ -342,6 +364,7 @@ struct KernelWithIntInputWithoutOutput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_input(Tensor dummy, int input) -> ()", RegisterOperators::options().kernel<KernelWithIntInputWithoutOutput>(DispatchKey::CPU));
|
||||
@ -361,6 +384,7 @@ struct KernelWithIntInputWithOutput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_input(Tensor dummy, int input) -> int", RegisterOperators::options().kernel<KernelWithIntInputWithOutput>(DispatchKey::CPU));
|
||||
@ -373,6 +397,7 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntInput_withOu
|
||||
EXPECT_EQ(4, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
int64_t captured_input_list_size = 0;
|
||||
|
||||
struct KernelWithIntListInputWithoutOutput final : OperatorKernel {
|
||||
@ -381,6 +406,7 @@ struct KernelWithIntListInputWithoutOutput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntListInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_list_input(Tensor dummy, int[] input) -> ()", RegisterOperators::options().kernel<KernelWithIntListInputWithoutOutput>(DispatchKey::CPU));
|
||||
@ -389,6 +415,7 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntListInput_wi
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
captured_input_list_size = 0;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
|
||||
EXPECT_EQ(0, outputs.size());
|
||||
EXPECT_EQ(3, captured_input_list_size);
|
||||
@ -400,6 +427,7 @@ struct KernelWithIntListInputWithOutput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntListInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::int_list_input(Tensor dummy, int[] input) -> int", RegisterOperators::options().kernel<KernelWithIntListInputWithOutput>(DispatchKey::CPU));
|
||||
@ -407,6 +435,7 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntListInput_wi
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_list_input", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
|
||||
EXPECT_EQ(1, outputs.size());
|
||||
EXPECT_EQ(3, outputs[0].toInt());
|
||||
@ -418,6 +447,7 @@ struct KernelWithTensorListInputWithoutOutput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithTensorListInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> ()", RegisterOperators::options().kernel<KernelWithTensorListInputWithoutOutput>(DispatchKey::CPU));
|
||||
@ -437,6 +467,7 @@ struct KernelWithTensorListInputWithOutput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithTensorListInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> int", RegisterOperators::options().kernel<KernelWithTensorListInputWithOutput>(DispatchKey::CPU));
|
||||
@ -449,6 +480,7 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithTensorListInput
|
||||
EXPECT_EQ(2, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
int captured_dict_size = 0;
|
||||
|
||||
struct KernelWithDictInputWithoutOutput final : OperatorKernel {
|
||||
@ -457,6 +489,7 @@ struct KernelWithDictInputWithoutOutput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithDictInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_input(Dict(str, Tensor) input) -> ()", RegisterOperators::options().catchAllKernel<KernelWithDictInputWithoutOutput>());
|
||||
@ -479,6 +512,7 @@ struct KernelWithDictInputWithOutput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithDictInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_input(Dict(str, str) input) -> str", RegisterOperators::options().catchAllKernel<KernelWithDictInputWithOutput>());
|
||||
@ -500,6 +534,7 @@ struct KernelWithDictOutput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithDictOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::dict_output(Dict(str, str) input) -> Dict(str, str)", RegisterOperators::options().catchAllKernel<KernelWithDictOutput>());
|
||||
@ -536,6 +571,7 @@ struct KernelWithTupleInput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithTupleInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tuple_input((str, int, float) input) -> str", RegisterOperators::options().catchAllKernel<KernelWithTupleInput>());
|
||||
@ -543,12 +579,14 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithTupleInput_with
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::tuple_input", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
std::tuple<string, int64_t, float> tup{"foobar", 123, 420.1337};
|
||||
auto outputs = callOp(*op, tup);
|
||||
EXPECT_EQ(1, outputs.size());
|
||||
EXPECT_EQ("foobar", outputs[0].toString()->string());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithCache_thenCacheIsKeptCorrectly) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::cache_op(Tensor input) -> int", RegisterOperators::options().kernel<KernelWithCache>(DispatchKey::CPU));
|
||||
@ -588,6 +626,7 @@ private:
|
||||
int64_t offset_;
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithConstructorArg_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::offset_op(Tensor tensor, int input) -> int", RegisterOperators::options().kernel<KernelWithConstructorArg>(DispatchKey::CPU, 2)
|
||||
@ -618,9 +657,11 @@ private:
|
||||
int64_t offset_;
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithMultipleConstructorArgs_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::offset_op(Tensor tensor, int input) -> int", RegisterOperators::options().kernel<KernelWithMultipleConstructorArgs>(DispatchKey::CPU, 2, 3)
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
.kernel<KernelWithMultipleConstructorArgs>(DispatchKey::CUDA, 4, 5));
|
||||
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::offset_op", ""});
|
||||
@ -635,6 +676,7 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithMultipleConstru
|
||||
EXPECT_EQ(13, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
bool called = false;
|
||||
|
||||
struct KernelWithoutInputs final : OperatorKernel {
|
||||
@ -643,6 +685,7 @@ struct KernelWithoutInputs final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenFallbackKernelWithoutAnyArguments_whenRegistered_thenCanBeCalled) {
|
||||
// note: non-fallback kernels without tensor arguments don't work because there
|
||||
// is no way to get the dispatch key. For operators that only have a fallback
|
||||
@ -664,6 +707,7 @@ struct KernelWithoutTensorInputs final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenFallbackKernelWithoutTensorArguments_whenRegistered_thenCanBeCalled) {
|
||||
// note: non-fallback kernels without tensor arguments don't work because there
|
||||
// is no way to get the dispatch key. For operators that only have a fallback
|
||||
@ -679,8 +723,11 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenFallbackKernelWithoutTens
|
||||
EXPECT_EQ(4, outputs[0].toInt());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
c10::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
c10::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
c10::optional<std::string> called_arg4 = c10::nullopt;
|
||||
|
||||
struct KernelWithOptInputWithoutOutput final : OperatorKernel {
|
||||
@ -692,6 +739,7 @@ struct KernelWithOptInputWithoutOutput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithOptionalInputs_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> ()", RegisterOperators::options().kernel<KernelWithOptInputWithoutOutput>(DispatchKey::CPU));
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""});
|
||||
@ -729,6 +777,7 @@ struct KernelWithOptInputWithOutput final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithOptionalInputs_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> Tensor?", RegisterOperators::options().kernel<KernelWithOptInputWithOutput>(DispatchKey::CPU));
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""});
|
||||
@ -765,6 +814,7 @@ struct KernelWithOptInputWithMultipleOutputs final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithOptionalInputs_withMultipleOutputs_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> (Tensor?, int?, str?)", RegisterOperators::options().kernel<KernelWithOptInputWithMultipleOutputs>(DispatchKey::CPU));
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""});
|
||||
@ -803,6 +853,7 @@ void expectCallsConcatUnboxed(DispatchKey dispatch_key) {
|
||||
EXPECT_EQ("prefix123", result);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernel_whenRegistered_thenCanBeCalledUnboxed) {
|
||||
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, str a, str b, int c) -> str", RegisterOperators::options().kernel<ConcatKernel>(DispatchKey::CPU, "prefix"));
|
||||
expectCallsConcatUnboxed(DispatchKey::CPU);
|
||||
@ -814,6 +865,7 @@ struct KernelForSchemaInference final : OperatorKernel {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernel_whenRegisteredWithoutSpecifyingSchema_thenInfersSchema) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::no_schema_specified", RegisterOperators::options().kernel<KernelForSchemaInference>(DispatchKey::CPU));
|
||||
@ -825,6 +877,7 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernel_whenRegisteredWith
|
||||
EXPECT_FALSE(differences.has_value());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernel_whenRegisteredCatchAllWithoutSpecifyingSchema_thenInfersSchema) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::no_schema_specified", RegisterOperators::options().catchAllKernel<KernelForSchemaInference>());
|
||||
@ -843,6 +896,7 @@ template<class... Args> struct KernelFunc<void, Args...> final : OperatorKernel
|
||||
void operator()(Args...) {}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenMismatchedKernel_withDifferentNumArguments_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
@ -879,6 +933,7 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenMismatchedKernel_withDiff
|
||||
);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenMismatchedKernel_withDifferentArgumentType_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
@ -898,6 +953,7 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenMismatchedKernel_withDiff
|
||||
);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenMismatchedKernel_withDifferentNumReturns_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
@ -957,6 +1013,7 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenMismatchedKernel_withDiff
|
||||
);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenMismatchedKernel_withDifferentReturnTypes_whenRegistering_thenFails) {
|
||||
// assert this does not fail because it matches
|
||||
RegisterOperators()
|
||||
|
@ -6,17 +6,20 @@ using c10::impl::CppSignature;
|
||||
|
||||
namespace {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(CppSignatureTest, given_equalSignature_then_areEqual) {
|
||||
EXPECT_EQ(CppSignature::make<void()>(), CppSignature::make<void()>());
|
||||
EXPECT_EQ(CppSignature::make<int64_t(std::string, int64_t)>(), CppSignature::make<int64_t(std::string, int64_t)>());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(CppSignatureTest, given_differentSignature_then_areDifferent) {
|
||||
EXPECT_NE(CppSignature::make<void()>(), CppSignature::make<int64_t()>());
|
||||
EXPECT_NE(CppSignature::make<int64_t(std::string)>(), CppSignature::make<int64_t(std::string, int64_t)>());
|
||||
EXPECT_NE(CppSignature::make<std::string(std::string)>(), CppSignature::make<int64_t(std::string)>());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(CppSignatureTest, given_equalFunctorAndFunction_then_areEqual) {
|
||||
struct Functor final {
|
||||
int64_t operator()(std::string) {return 0;}
|
||||
@ -24,6 +27,7 @@ TEST(CppSignatureTest, given_equalFunctorAndFunction_then_areEqual) {
|
||||
EXPECT_EQ(CppSignature::make<Functor>(), CppSignature::make<int64_t(std::string)>());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(CppSignatureTest, given_differentFunctorAndFunction_then_areDifferent) {
|
||||
struct Functor final {
|
||||
int64_t operator()(std::string) {return 0;}
|
||||
@ -31,6 +35,7 @@ TEST(CppSignatureTest, given_differentFunctorAndFunction_then_areDifferent) {
|
||||
EXPECT_NE(CppSignature::make<Functor>(), CppSignature::make<int64_t(std::string, int64_t)>());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(CppSignatureTest, given_cppSignature_then_canQueryNameWithoutCrashing) {
|
||||
CppSignature::make<void(int64_t, const int64_t&)>().name();
|
||||
}
|
||||
|
@ -32,6 +32,7 @@ private:
|
||||
};
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
OpRegistrationListener::~OpRegistrationListener() {}
|
||||
|
||||
Dispatcher::Dispatcher()
|
||||
@ -41,6 +42,7 @@ Dispatcher::Dispatcher()
|
||||
, listeners_(std::make_unique<detail::RegistrationListenerList>())
|
||||
, mutex_() {}
|
||||
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
Dispatcher::~Dispatcher() {}
|
||||
|
||||
C10_EXPORT Dispatcher& Dispatcher::realSingleton() {
|
||||
@ -201,6 +203,7 @@ RegistrationHandleRAII Dispatcher::registerImpl(
|
||||
*this,
|
||||
dispatch_key,
|
||||
std::move(kernel),
|
||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
||||
std::move(cpp_signature),
|
||||
std::move(inferred_function_schema),
|
||||
std::move(debug)
|
||||
|
@ -51,6 +51,7 @@ const AnnotatedKernel OperatorEntry::ambiguousAutogradOtherKernel_ = AnnotatedKe
|
||||
|
||||
void OperatorEntry::registerSchema(FunctionSchema&& schema, std::string&& debug) {
|
||||
TORCH_INTERNAL_ASSERT(!schema_.has_value());
|
||||
// NOLINTNEXTLINE(modernize-loop-convert)
|
||||
for (auto i = kernels_.begin(); i != kernels_.end(); ++i) {
|
||||
for (auto j = i->second.begin(); j != i->second.end(); ++j) {
|
||||
if (j->inferred_function_schema != nullptr) {
|
||||
|
@ -18,6 +18,7 @@ namespace {
|
||||
// but this could be used as a starting point to do more interesting things.
|
||||
|
||||
// Global counter for ease of testing
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
static int64_t override_call_count = 0;
|
||||
|
||||
// Mode implementation
|
||||
@ -80,6 +81,7 @@ void generic_wrapper_fallback(const c10::OperatorHandle& op, torch::jit::Stack*
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(BackendFallbackTest, TestBackendFallbackWithMode) {
|
||||
auto m = MAKE_TORCH_LIBRARY_IMPL(_, TESTING_ONLY_GenericMode);
|
||||
m.fallback(torch::CppFunction::makeFromBoxedFunction<&generic_mode_fallback>());
|
||||
@ -87,21 +89,27 @@ TEST(BackendFallbackTest, TestBackendFallbackWithMode) {
|
||||
c10::impl::IncludeDispatchKeyGuard guard(DispatchKey::TESTING_ONLY_GenericMode);
|
||||
|
||||
override_call_count = 0;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
Tensor a = ones({5, 5}, kDouble);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
Tensor b = batch_norm(a, {}, {}, {}, {}, true, 0.1, 1e-05, false);
|
||||
ASSERT_EQ(override_call_count, 2);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(BackendFallbackTest, TestBackendFallbackWithWrapper) {
|
||||
auto m = MAKE_TORCH_LIBRARY_IMPL(_, TESTING_ONLY_GenericWrapper);
|
||||
m.fallback(torch::CppFunction::makeFromBoxedFunction<&generic_wrapper_fallback>());
|
||||
|
||||
override_call_count = 0;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
Tensor a = at::detail::make_tensor<GenericWrapperTensorImpl>(ones({5, 5}, kDouble));
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
Tensor b = batch_norm(a, {}, {}, {}, {}, true, 0.1, 1e-05, false);
|
||||
ASSERT_EQ(override_call_count, 1);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(BackendFallbackTest, TestFallthroughBackendFallback) {
|
||||
auto m = MAKE_TORCH_LIBRARY_IMPL(aten, TESTING_ONLY_GenericMode);
|
||||
m.impl("mul.Tensor", torch::CppFunction::makeFromBoxedFunction<&generic_mode_fallback>());
|
||||
@ -113,6 +121,7 @@ TEST(BackendFallbackTest, TestFallthroughBackendFallback) {
|
||||
|
||||
override_call_count = 0;
|
||||
// Doesn't trigger, as we fallthrough
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
Tensor a = zeros({5, 5}, kDouble);
|
||||
ASSERT_EQ(override_call_count, 0);
|
||||
// Does trigger, because we explicitly set it
|
||||
|
@ -53,6 +53,7 @@ Symbol InternedStrings::ns(Symbol sym) {
|
||||
#define DEFINE_CASE(ns, s) \
|
||||
case static_cast<unique_t>(ns::s): \
|
||||
return namespaces::ns;
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone)
|
||||
FORALL_NS_SYMBOLS(DEFINE_CASE)
|
||||
#undef DEFINE_CASE
|
||||
default: {
|
||||
|
@ -337,6 +337,7 @@ size_t IValue::hash(const IValue& v) {
|
||||
// Tensor __hash__ is equivalent to `id()`, so take the pointer value of
|
||||
// the tensor to emulate it
|
||||
return c10::get_hash(v.payload.as_tensor.unsafeGetTensorImpl());
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone)
|
||||
case Tag::Storage:
|
||||
return c10::get_hash(v.payload.u.as_int);
|
||||
case Tag::Int:
|
||||
@ -507,6 +508,7 @@ std::ostream& IValue::repr(
|
||||
case IValue::Tag::Double: {
|
||||
double d = v.toDouble();
|
||||
int c = std::fpclassify(d);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if ((c == FP_NORMAL || c == FP_ZERO ) && std::abs(d) < 1e10) {
|
||||
int64_t i = int64_t(d);
|
||||
if (double(i) == d) {
|
||||
|
@ -34,6 +34,7 @@ namespace {
|
||||
|
||||
CppFunction::CppFunction(c10::KernelFunction func, c10::optional<c10::impl::CppSignature> cpp_signature, std::unique_ptr<c10::FunctionSchema> schema)
|
||||
: func_(std::move(func))
|
||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
||||
, cpp_signature_(std::move(cpp_signature))
|
||||
, schema_(std::move(schema))
|
||||
, debug_()
|
||||
@ -152,6 +153,7 @@ Library& Library::_def(c10::either<c10::OperatorName, c10::FunctionSchema>&& nam
|
||||
std::move(name),
|
||||
dispatch_key,
|
||||
std::move(f.func_),
|
||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
||||
std::move(f.cpp_signature_),
|
||||
std::move(f.schema_),
|
||||
debugString(std::move(f.debug_), file_, line_)
|
||||
@ -197,6 +199,7 @@ Library& Library::_impl(const char* name_str, CppFunction&& f) & {
|
||||
std::move(name),
|
||||
dispatch_key,
|
||||
std::move(f.func_),
|
||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
||||
std::move(f.cpp_signature_),
|
||||
std::move(f.schema_),
|
||||
debugString(std::move(f.debug_), file_, line_)
|
||||
|
@ -11,6 +11,7 @@ std::vector<Argument> createArgumentVector(c10::ArrayRef<ArgumentDef> args) {
|
||||
result.reserve(args.size());
|
||||
for (size_t i = 0; i < args.size(); ++i) {
|
||||
// Arguments are named "_<index>"
|
||||
// NOLINTNEXTLINE(modernize-use-emplace)
|
||||
result.push_back(Argument("_" + c10::guts::to_string(i), (*args[i].getTypeFn)()));
|
||||
}
|
||||
return result;
|
||||
|
@ -98,6 +98,7 @@ void RegisterOperators::registerOp_(Options&& options) {
|
||||
|
||||
for (auto& kernel : options.kernels) {
|
||||
registrars_.emplace_back(
|
||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
||||
Dispatcher::singleton().registerImpl(op_name, kernel.dispatch_key, std::move(kernel.func), std::move(kernel.cpp_signature), std::move(kernel.inferred_function_schema), "registered by RegisterOperators")
|
||||
);
|
||||
}
|
||||
@ -106,6 +107,7 @@ void RegisterOperators::registerOp_(Options&& options) {
|
||||
RegisterOperators::RegisterOperators() = default;
|
||||
RegisterOperators::~RegisterOperators() = default;
|
||||
RegisterOperators::RegisterOperators(RegisterOperators&&) noexcept = default;
|
||||
// NOLINTNEXTLINE(bugprone-exception-escape)
|
||||
RegisterOperators& RegisterOperators::operator=(RegisterOperators&&) noexcept = default;
|
||||
|
||||
} // namespace c10
|
||||
|
@ -46,6 +46,7 @@ private:
|
||||
bool* called_;
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringWithSchemaBeforeKernelInOptionsObject_thenCanBeCalled) {
|
||||
bool called = false;
|
||||
auto registrar = c10::RegisterOperators().op(c10::RegisterOperators::options().schema("_test::dummy(Tensor dummy) -> ()").catchAllKernel<MockKernel>(&called));
|
||||
@ -57,6 +58,7 @@ TEST(OperatorRegistrationTest, whenRegisteringWithSchemaBeforeKernelInOptionsObj
|
||||
EXPECT_TRUE(called);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringWithSchemaAfterKernelInOptionsObject_thenCanBeCalled) {
|
||||
bool called = false;
|
||||
auto registrar = c10::RegisterOperators().op(c10::RegisterOperators::options().catchAllKernel<MockKernel>(&called).schema("_test::dummy(Tensor dummy) -> ()"));
|
||||
@ -68,6 +70,7 @@ TEST(OperatorRegistrationTest, whenRegisteringWithSchemaAfterKernelInOptionsObje
|
||||
EXPECT_TRUE(called);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringWithNameBeforeKernelInOptionsObject_thenCanBeCalled) {
|
||||
bool called = false;
|
||||
auto registrar = c10::RegisterOperators().op(c10::RegisterOperators::options().schema("_test::dummy").catchAllKernel<MockKernel>(&called));
|
||||
@ -79,6 +82,7 @@ TEST(OperatorRegistrationTest, whenRegisteringWithNameBeforeKernelInOptionsObjec
|
||||
EXPECT_TRUE(called);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringWithNameAfterKernelInOptionsObject_thenCanBeCalled) {
|
||||
bool called = false;
|
||||
auto registrar = c10::RegisterOperators().op(c10::RegisterOperators::options().catchAllKernel<MockKernel>(&called).schema("_test::dummy"));
|
||||
@ -90,12 +94,14 @@ TEST(OperatorRegistrationTest, whenRegisteringWithNameAfterKernelInOptionsObject
|
||||
EXPECT_TRUE(called);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringWithoutSchema_thenFails) {
|
||||
expectThrows<c10::Error>([] {
|
||||
c10::RegisterOperators().op(c10::RegisterOperators::options().catchAllKernel<DummyKernel>());
|
||||
}, "In operator registration: Tried to register an operator without specifying a schema or operator name.");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenCallingOpWithWrongDispatchKey_thenFails) {
|
||||
auto registrar = c10::RegisterOperators().op("_test::dummy(Tensor dummy) -> ()", c10::RegisterOperators::options().kernel<DummyKernel>(c10::DispatchKey::CPU));
|
||||
|
||||
@ -107,6 +113,7 @@ TEST(OperatorRegistrationTest, whenCallingOpWithWrongDispatchKey_thenFails) {
|
||||
" backend.");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, givenOpWithCatchallKernel_whenCallingOp_thenCallsCatchallKernel) {
|
||||
bool called = false;
|
||||
auto registrar = c10::RegisterOperators().op("_test::dummy(Tensor dummy) -> ()", c10::RegisterOperators::options().catchAllKernel<MockKernel>(&called));
|
||||
@ -136,6 +143,7 @@ TEST(OperatorRegistrationTest, givenOpWithCatchallKernel_whenCallingOp_thenCalls
|
||||
// }, "for an operator which already has a catch-all kernel registered");
|
||||
// }
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, givenOpWithDispatchedKernelOutOfScope_whenRegisteringCatchallKernelAndCallingOp_thenCallsCatchallKernel) {
|
||||
bool called = false;
|
||||
{
|
||||
@ -169,6 +177,7 @@ TEST(OperatorRegistrationTest, givenOpWithDispatchedKernelOutOfScope_whenRegiste
|
||||
// }, "Tried to register a catch-all kernel for an operator which already has kernels for dispatch keys CPU. An operator can only have either a catch-all kernel or kernels with dispatch keys. The operator schema is _test::dummy");
|
||||
// }
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, givenOpWithCatchallKernelOutOfScope_whenRegisteringDispatchedKernelAndCallingOp_thenCallsCatchallKernel) {
|
||||
bool called = false;
|
||||
{
|
||||
@ -184,6 +193,7 @@ TEST(OperatorRegistrationTest, givenOpWithCatchallKernelOutOfScope_whenRegisteri
|
||||
EXPECT_TRUE(called);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, givenOpWithoutKernels_whenRegisteringWithSchema_thenOnlyRegistersSchema) {
|
||||
auto registrar = c10::RegisterOperators().op("_test::dummy(Tensor dummy) -> ()");
|
||||
|
||||
@ -195,12 +205,14 @@ TEST(OperatorRegistrationTest, givenOpWithoutKernels_whenRegisteringWithSchema_t
|
||||
" backend.");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, givenOpWithoutKernels_whenRegisteringWithoutSchema_thenFails) {
|
||||
expectThrows<c10::Error>([&] {
|
||||
c10::RegisterOperators().op("_test::dummy");
|
||||
}, "Cannot infer operator schema in registration of operator _test::dummy because there is no kernel specified.");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, givenOpWithoutKernels_whenRunningOutOfScope_thenSchemaIsGone) {
|
||||
{
|
||||
auto registrar = c10::RegisterOperators().op("_test::dummy(Tensor dummy) -> ()");
|
||||
@ -210,6 +222,7 @@ TEST(OperatorRegistrationTest, givenOpWithoutKernels_whenRunningOutOfScope_thenS
|
||||
EXPECT_FALSE(op.has_value());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, givenOpWithoutKernelsWithoutTensorInputs_whenRegistering_thenRegisters) {
|
||||
// as long as we don't register non-catchall kernels, ops without tensor arguments are fine
|
||||
auto registrar = c10::RegisterOperators().op("_test::dummy() -> ()");
|
||||
@ -218,6 +231,7 @@ TEST(OperatorRegistrationTest, givenOpWithoutKernelsWithoutTensorInputs_whenRegi
|
||||
ASSERT_TRUE(op.has_value()); // assert schema is registered
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, givenMultipleKernelsWithSameDispatchKey_whenRegisteringInSameOpCall_thenFails) {
|
||||
expectThrows<c10::Error>([&] {
|
||||
auto registrar = c10::RegisterOperators()
|
||||
@ -227,6 +241,7 @@ TEST(OperatorRegistrationTest, givenMultipleKernelsWithSameDispatchKey_whenRegis
|
||||
}, "In operator registration: Tried to register multiple kernels with same dispatch key CPU for operator schema _test::dummy");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, givenMultipleCatchallKernels_whenRegisteringInSameOpCall_thenFails) {
|
||||
expectThrows<c10::Error>([&] {
|
||||
auto registrar = c10::RegisterOperators()
|
||||
@ -236,6 +251,7 @@ TEST(OperatorRegistrationTest, givenMultipleCatchallKernels_whenRegisteringInSam
|
||||
}, "Tried to register multiple catch-all kernels for operator schema _test::dummy");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringCPUTensorType_thenCanOnlyCallUnboxedWithCPUDispatchKey) {
|
||||
bool called_kernel_cpu = false;
|
||||
auto registrar= c10::RegisterOperators().op("_test::dummy(Tensor dummy) -> ()", c10::RegisterOperators::options()
|
||||
@ -257,6 +273,7 @@ TEST(OperatorRegistrationTest, whenRegisteringCPUTensorType_thenCanOnlyCallUnbox
|
||||
" backend.");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringMultipleKernelsInSameOpCallAndCalling_thenCallsCorrectKernel) {
|
||||
bool called_kernel1 = false;
|
||||
bool called_kernel2 = false;
|
||||
@ -291,11 +308,13 @@ TEST(OperatorRegistrationTest, whenRegisteringMultipleKernelsInSameOpCallAndCall
|
||||
}, "CUDA");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
bool called_stackbased_kernel = false;
|
||||
void stackBasedKernel(const OperatorHandle&, c10::Stack* stack) {
|
||||
called_stackbased_kernel = true;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringMultipleKernelsByNameAndNoneCanInferSchema_thenFails) {
|
||||
bool called_kernel = false;
|
||||
expectThrows<c10::Error>([&] {
|
||||
@ -306,6 +325,7 @@ TEST(OperatorRegistrationTest, whenRegisteringMultipleKernelsByNameAndNoneCanInf
|
||||
}, "Cannot infer operator schema for this kind of kernel in registration of operator _test::dummy");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringMultipleKernelsBySchemaAndNoneCanInferSchema_thenSucceeds) {
|
||||
bool called_kernel = false;
|
||||
auto registrar1 = c10::RegisterOperators().op("_test::dummy(Tensor dummy) -> ()", c10::RegisterOperators::options()
|
||||
@ -332,6 +352,7 @@ TEST(OperatorRegistrationTest, whenRegisteringMultipleKernelsBySchemaAndNoneCanI
|
||||
EXPECT_FALSE(called_kernel);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringMultipleKernelsByNameAndOnlyOneCanInferSchema_thenSucceeds) {
|
||||
bool called_kernel = false;
|
||||
auto registrar1 = c10::RegisterOperators().op("_test::dummy", c10::RegisterOperators::options()
|
||||
@ -358,6 +379,7 @@ TEST(OperatorRegistrationTest, whenRegisteringMultipleKernelsByNameAndOnlyOneCan
|
||||
EXPECT_FALSE(called_kernel);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringMultipleKernelsBySchemaAndOnlyOneCanInferSchema_thenSucceeds) {
|
||||
bool called_kernel = false;
|
||||
auto registrar1 = c10::RegisterOperators().op("_test::dummy(Tensor dummy) -> ()", c10::RegisterOperators::options()
|
||||
@ -388,6 +410,7 @@ struct DummyKernelWithIntParam final : OperatorKernel {
|
||||
void operator()(Tensor, int64_t) {}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringMismatchingKernelsInSameOpCall_thenFails) {
|
||||
bool called_kernel = false;
|
||||
expectThrows<c10::Error>([&] {
|
||||
@ -401,6 +424,7 @@ void backend_fallback_kernel(const c10::OperatorHandle& op, c10::Stack* stack) {
|
||||
(*stack)[1] = (*stack)[1].toString()->string() + op.schema().name();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringBackendFallbackKernel_thenCanBeCalled) {
|
||||
auto registrar = c10::Dispatcher::singleton().registerFallback(c10::DispatchKey::CPU, c10::KernelFunction::makeFromBoxedFunction<&backend_fallback_kernel>(), "");
|
||||
|
||||
@ -411,6 +435,7 @@ TEST(OperatorRegistrationTest, whenRegisteringBackendFallbackKernel_thenCanBeCal
|
||||
EXPECT_EQ("hello _test::dummy", stack[1].toString()->string());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringBackendFallbackKernelForWrongBackend_thenCannotBeCalled) {
|
||||
auto registrar = c10::Dispatcher::singleton().registerFallback(c10::DispatchKey::CUDA, c10::KernelFunction::makeFromBoxedFunction<&backend_fallback_kernel>(), "");
|
||||
|
||||
@ -422,8 +447,10 @@ TEST(OperatorRegistrationTest, whenRegisteringBackendFallbackKernelForWrongBacke
|
||||
}, "Could not run '_test::dummy' with arguments from the 'CPU' backend.");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
bool called = false;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringBackendFallbackKernelAndRegularKernelForDifferentBackend_thenRegularKernelCanBeCalled) {
|
||||
auto registrar = c10::Dispatcher::singleton().registerFallback(c10::DispatchKey::CPU, c10::KernelFunction::makeFromBoxedFunction<&backend_fallback_kernel>(), "");
|
||||
|
||||
@ -439,6 +466,7 @@ TEST(OperatorRegistrationTest, whenRegisteringBackendFallbackKernelAndRegularKer
|
||||
EXPECT_TRUE(called);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringBackendFallbackKernelAndRegularKernelForDifferentBackend_thenFallbackKernelCanBeCalled) {
|
||||
auto registrar = c10::Dispatcher::singleton().registerFallback(c10::DispatchKey::CPU, c10::KernelFunction::makeFromBoxedFunction<&backend_fallback_kernel>(), "");
|
||||
|
||||
@ -455,6 +483,7 @@ TEST(OperatorRegistrationTest, whenRegisteringBackendFallbackKernelAndRegularKer
|
||||
EXPECT_EQ("hello _test::dummy", stack[1].toString()->string());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringBackendFallbackKernelAndRegularKernelForSameBackend_thenCallsRegularKernel) {
|
||||
auto registrar = c10::Dispatcher::singleton().registerFallback(c10::DispatchKey::CPU, c10::KernelFunction::makeFromBoxedFunction<&backend_fallback_kernel>(), "");
|
||||
|
||||
@ -470,7 +499,9 @@ TEST(OperatorRegistrationTest, whenRegisteringBackendFallbackKernelAndRegularKer
|
||||
EXPECT_TRUE(called);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
bool called_autograd = false;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
bool called_nonautograd = false;
|
||||
|
||||
void nonautograd_kernel(Tensor a) {
|
||||
@ -481,6 +512,7 @@ void autograd_kernel(Tensor a) {
|
||||
called_autograd = true;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringAutogradKernel_thenCanCallAutogradKernel) {
|
||||
auto registrar = c10::RegisterOperators().op("_test::dummy(Tensor dummy) -> ()", c10::RegisterOperators::options()
|
||||
.kernel<decltype(autograd_kernel), &autograd_kernel>(DispatchKey::Autograd));
|
||||
@ -498,6 +530,7 @@ TEST(OperatorRegistrationTest, whenRegisteringAutogradKernel_thenCanCallAutograd
|
||||
EXPECT_TRUE(called_autograd);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringAutogradKernelWithRegularKernel_thenCanCallAutogradKernel) {
|
||||
auto registrar = c10::RegisterOperators().op("_test::dummy(Tensor dummy) -> ()", c10::RegisterOperators::options()
|
||||
.kernel<decltype(nonautograd_kernel), nonautograd_kernel>(DispatchKey::CPU)
|
||||
@ -512,6 +545,7 @@ TEST(OperatorRegistrationTest, whenRegisteringAutogradKernelWithRegularKernel_th
|
||||
EXPECT_TRUE(called_autograd);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringAutogradKernelWithCatchAllKernel_thenCanCallAutogradKernel) {
|
||||
auto registrar = c10::RegisterOperators().op("_test::dummy(Tensor dummy) -> ()", c10::RegisterOperators::options()
|
||||
.catchAllKernel<decltype(nonautograd_kernel), nonautograd_kernel>()
|
||||
@ -527,6 +561,7 @@ TEST(OperatorRegistrationTest, whenRegisteringAutogradKernelWithCatchAllKernel_t
|
||||
EXPECT_FALSE(called_autograd);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisteringAutogradKernelWithCatchAllKernel_thenCanCallCatchallKernel) {
|
||||
auto registrar = c10::RegisterOperators().op("_test::dummy(Tensor dummy) -> ()", c10::RegisterOperators::options()
|
||||
.catchAllKernel<decltype(nonautograd_kernel), nonautograd_kernel>()
|
||||
@ -541,6 +576,7 @@ TEST(OperatorRegistrationTest, whenRegisteringAutogradKernelWithCatchAllKernel_t
|
||||
EXPECT_FALSE(called_autograd);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, AutogradBackendOverridesAutogradKernel) {
|
||||
auto registrar = c10::RegisterOperators().op("_test::dummy(Tensor dummy) -> ()", c10::RegisterOperators::options()
|
||||
.kernel<decltype(nonautograd_kernel), &nonautograd_kernel>(DispatchKey::AutogradCPU)
|
||||
@ -570,6 +606,7 @@ TEST(OperatorRegistrationTest, AutogradBackendOverridesAutogradKernel) {
|
||||
EXPECT_FALSE(called_nonautograd);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, AutogradXLAOverridesAutogradKernel) {
|
||||
auto registrar = c10::RegisterOperators().op("_test::dummy(Tensor dummy) -> ()", c10::RegisterOperators::options()
|
||||
.kernel<decltype(nonautograd_kernel), &nonautograd_kernel>(DispatchKey::AutogradXLA)
|
||||
@ -594,6 +631,7 @@ TEST(OperatorRegistrationTest, AutogradXLAOverridesAutogradKernel) {
|
||||
EXPECT_FALSE(called_nonautograd);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, whenRegisterWithXLAKernelAndCatchAll_AutogradXLAIsNotFilled) {
|
||||
{
|
||||
auto registrar = c10::RegisterOperators().op("_test::dummy(Tensor dummy) -> ()", c10::RegisterOperators::options()
|
||||
@ -635,6 +673,7 @@ TEST(OperatorRegistrationTest, whenRegisterWithXLAKernelAndCatchAll_AutogradXLAI
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, givenLambdaKernel_whenRegisteringWithMismatchingCppSignatures_thenFails) {
|
||||
expectThrows<c10::Error>([] {
|
||||
auto registrar = c10::RegisterOperators().op("_test::dummy", c10::RegisterOperators::options()
|
||||
@ -643,6 +682,7 @@ TEST(OperatorRegistrationTest, givenLambdaKernel_whenRegisteringWithMismatchingC
|
||||
}, "Mismatch in kernel C++ signatures");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, givenLambdaKernel_whenRegisteringCatchAllAndBackendWithMismatchingCppSignatures_thenFails) {
|
||||
expectThrows<c10::Error>([] {
|
||||
auto registrar = c10::RegisterOperators().op("_test::dummy", c10::RegisterOperators::options()
|
||||
@ -651,6 +691,7 @@ TEST(OperatorRegistrationTest, givenLambdaKernel_whenRegisteringCatchAllAndBacke
|
||||
}, "Mismatch in kernel C++ signatures");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, givenLambdaKernel_whenRegisteringBackendAndCatchAllWithMismatchingCppSignatures_thenFails) {
|
||||
expectThrows<c10::Error>([] {
|
||||
auto registrar = c10::RegisterOperators().op("_test::dummy", c10::RegisterOperators::options()
|
||||
@ -659,6 +700,7 @@ TEST(OperatorRegistrationTest, givenLambdaKernel_whenRegisteringBackendAndCatchA
|
||||
}, "Mismatch in kernel C++ signatures");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, givenLambdaKernel_whenAccessingWithMismatchingCppSignatures_thenFails) {
|
||||
auto registrar = c10::RegisterOperators().op("_test::dummy", c10::RegisterOperators::options()
|
||||
.kernel(DispatchKey::CPU, [] (int64_t) {}));
|
||||
@ -668,6 +710,7 @@ TEST(OperatorRegistrationTest, givenLambdaKernel_whenAccessingWithMismatchingCpp
|
||||
}, "Tried to access or call an operator with a wrong signature.\n operator: _test::dummy(int _0) -> ()");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, givenLambdaKernel_whenAccessingCatchAllWithMismatchingCppSignatures_thenFails) {
|
||||
auto registrar = c10::RegisterOperators().op("_test::dummy", c10::RegisterOperators::options()
|
||||
.catchAllKernel([] (int64_t) {}));
|
||||
@ -677,6 +720,7 @@ TEST(OperatorRegistrationTest, givenLambdaKernel_whenAccessingCatchAllWithMismat
|
||||
}, "Tried to access or call an operator with a wrong signature.\n operator: _test::dummy(int _0) -> ()");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, givenTorchLibrary_whenRegisteringWithMismatchingCppSignatures_thenFails) {
|
||||
auto m = MAKE_TORCH_LIBRARY(_test);
|
||||
m.def("dummy(int a) -> ()");
|
||||
@ -686,6 +730,7 @@ TEST(OperatorRegistrationTest, givenTorchLibrary_whenRegisteringWithMismatchingC
|
||||
}, "Mismatch in kernel C++ signatures");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, givenTorchLibrary_whenAccessingWithMismatchingCppSignatures_thenFails) {
|
||||
auto m = MAKE_TORCH_LIBRARY(_test);
|
||||
m.def("dummy(int a) -> ()");
|
||||
@ -696,6 +741,7 @@ TEST(OperatorRegistrationTest, givenTorchLibrary_whenAccessingWithMismatchingCpp
|
||||
}, "Tried to access or call an operator with a wrong signature.\n operator: _test::dummy(int a) -> ()");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, givenTorchLibrary_whenAccessingCatchAllWithMismatchingCppSignatures_thenFails) {
|
||||
auto m = MAKE_TORCH_LIBRARY(_test);
|
||||
m.def("dummy(int a) -> ()", [] (int64_t) {});
|
||||
@ -807,12 +853,15 @@ struct testArgTypes final {
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, testAvailableArgTypes) {
|
||||
// TODO Test Scalar
|
||||
|
||||
// primitive types
|
||||
testArgTypes<double>::test(
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
1.5, [] (const double& v) {EXPECT_EQ(1.5, v);},
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
2.5, [] (const IValue& v) {EXPECT_EQ(2.5, v.toDouble());},
|
||||
"(float a) -> float");
|
||||
testArgTypes<int64_t>::test(
|
||||
@ -839,7 +888,9 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
|
||||
|
||||
// optional types (with has_value() == true)
|
||||
testArgTypes<c10::optional<double>>::test(
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
c10::optional<double>(1.5), [] (const c10::optional<double>& v) {EXPECT_EQ(1.5, v.value());},
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
c10::optional<double>(2.5), [] (const IValue& v) {EXPECT_EQ(2.5, v.toDouble());},
|
||||
"(float? a) -> float?");
|
||||
testArgTypes<c10::optional<int64_t>>::test(
|
||||
@ -912,7 +963,9 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
|
||||
|
||||
// list types (with non-empty list)
|
||||
testArgTypes<c10::List<double>>::test(
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
c10::List<double>({1.5, 2.5}), [] (const c10::List<double>& v) {expectListEquals({1.5, 2.5}, v);},
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
c10::List<double>({3.5, 4.5}), [] (const IValue& v) {expectListEquals({3.5, 4.5}, v.to<c10::List<double>>());},
|
||||
"(float[] a) -> float[]");
|
||||
testArgTypes<c10::List<int64_t>>::test(
|
||||
@ -961,7 +1014,9 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
|
||||
|
||||
// list types (with non-empty list)
|
||||
testArgTypes<c10::ArrayRef<double>, c10::List<double>>::test(
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
c10::ArrayRef<double>({1.5, 2.5}), [] (c10::ArrayRef<double> v) {expectListEquals({1.5, 2.5}, v);},
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
c10::List<double>({3.5, 4.5}), [] (const IValue& v) {expectListEquals({3.5, 4.5}, v.to<c10::List<double>>());},
|
||||
"(float[] a) -> float[]");
|
||||
testArgTypes<c10::ArrayRef<int64_t>, c10::List<int64_t>>::test(
|
||||
@ -1011,7 +1066,9 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
|
||||
|
||||
// std::array list types (with non-empty list)
|
||||
testArgTypes<std::array<double, 2>>::test(
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
std::array<double, 2>({1.5, 2.5}), [] (std::array<double, 2> v) {expectListEquals({1.5, 2.5}, v);},
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
std::array<double, 2>({3.5, 4.5}), [] (const IValue& v) {expectListEquals({3.5, 4.5}, v.to<std::array<double, 2>>());},
|
||||
"(float[2] a) -> float[2]");
|
||||
testArgTypes<std::array<int64_t, 2>>::test(
|
||||
@ -1062,7 +1119,9 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
|
||||
|
||||
// deprecated list types (with non-empty list)
|
||||
testArgTypes<std::vector<double>>::test<TestLegacyAPI>(
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
std::vector<double>({1.5, 2.5}), [] (const std::vector<double>& v) {expectListEquals({1.5, 2.5}, v);},
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
std::vector<double>({3.5, 4.5}), [] (const IValue& v) {expectListEquals({3.5, 4.5}, v.to<c10::List<double>>());},
|
||||
"(float[] a) -> float[]");
|
||||
testArgTypes<std::vector<int64_t>>::test<TestLegacyAPI>(
|
||||
@ -1208,6 +1267,7 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
|
||||
"(Dict(str, Dict(int, str)?[])[] a) -> Dict(str, Dict(int, str)?[])[]");
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, testBasics) {
|
||||
auto m = MAKE_TORCH_LIBRARY(_test);
|
||||
m.def("dummy(Tensor self) -> Tensor");
|
||||
@ -1229,6 +1289,7 @@ TEST(NewOperatorRegistrationTest, testBasics) {
|
||||
ASSERT_TRUE(Dispatcher::singleton().findSchema({"_test::dummy4", ""}).has_value());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, importTopLevel) {
|
||||
auto m = MAKE_TORCH_LIBRARY(test);
|
||||
m.def("def1(Tensor self) -> Tensor");
|
||||
@ -1247,6 +1308,7 @@ TEST(NewOperatorRegistrationTest, importTopLevel) {
|
||||
ASSERT_TRUE(Dispatcher::singleton().findOp({"test::impl1", ""}).has_value());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, overload) {
|
||||
auto m = MAKE_TORCH_LIBRARY(test);
|
||||
m.def("fn(Tensor self) -> Tensor");
|
||||
@ -1258,6 +1320,7 @@ TEST(NewOperatorRegistrationTest, overload) {
|
||||
ASSERT_TRUE(Dispatcher::singleton().findSchema({"test::fn", "overload2"}).has_value());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, importNamespace) {
|
||||
auto m = MAKE_TORCH_LIBRARY(test);
|
||||
m.def("def1(Tensor self) -> Tensor");
|
||||
@ -1274,6 +1337,7 @@ TEST(NewOperatorRegistrationTest, importNamespace) {
|
||||
ASSERT_TRUE(Dispatcher::singleton().findOp({"test::impl1", ""}).has_value());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, schema) {
|
||||
auto m = MAKE_TORCH_LIBRARY(test);
|
||||
m.def("def1(Tensor self) -> Tensor");
|
||||
@ -1292,6 +1356,7 @@ TEST(NewOperatorRegistrationTest, schema) {
|
||||
ASSERT_TRUE(Dispatcher::singleton().findSchema({"test::def4", ""})->schema().isDefaultAliasAnalysisKind());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, whenRegisteringBackendFallbackKernelAndCatchallKernelForSameBackend_thenCallsFallbackKernel) {
|
||||
auto m1 = MAKE_TORCH_LIBRARY_IMPL(_, CPU);
|
||||
m1.fallback(CppFunction::makeFromBoxedFunction<&backend_fallback_kernel>());
|
||||
@ -1310,6 +1375,7 @@ TEST(NewOperatorRegistrationTest, whenRegisteringBackendFallbackKernelAndCatchal
|
||||
EXPECT_TRUE(called);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, whenRegisteringAutogradKernelWithRegularKernel_thenCanCallRegularKernel) {
|
||||
auto m = MAKE_TORCH_LIBRARY(test);
|
||||
m.def("fn(Tensor dummy) -> ()");
|
||||
@ -1325,6 +1391,7 @@ TEST(NewOperatorRegistrationTest, whenRegisteringAutogradKernelWithRegularKernel
|
||||
EXPECT_FALSE(called_autograd);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, dispatchWithCompositeImplicitAutogradKernel) {
|
||||
bool math_called = false;
|
||||
auto m = MAKE_TORCH_LIBRARY(test);
|
||||
@ -1370,6 +1437,7 @@ TEST(NewOperatorRegistrationTest, dispatchWithCompositeImplicitAutogradKernel) {
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, dispatchWithCompositeImplicitAutogradAndAutogradKernel) {
|
||||
bool math_called = false;
|
||||
bool autograd_called = false;
|
||||
@ -1396,6 +1464,7 @@ TEST(NewOperatorRegistrationTest, dispatchWithCompositeImplicitAutogradAndAutogr
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, dispatchWithCompositeImplicitAutogradAndCatchAllKernel) {
|
||||
bool math_called = false;
|
||||
bool catchall_called = false;
|
||||
@ -1423,6 +1492,7 @@ TEST(NewOperatorRegistrationTest, dispatchWithCompositeImplicitAutogradAndCatchA
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, AutogradBackendOverridesCompositeImplicitAutogradKernel) {
|
||||
bool math_called = false;
|
||||
bool autograd_called = false;
|
||||
@ -1462,6 +1532,7 @@ TEST(NewOperatorRegistrationTest, AutogradBackendOverridesCompositeImplicitAutog
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, BackendOverridesCompositeImplicitAutogradKernel) {
|
||||
bool math_called = false;
|
||||
bool backend_called = false;
|
||||
@ -1502,6 +1573,7 @@ TEST(NewOperatorRegistrationTest, BackendOverridesCompositeImplicitAutogradKerne
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, dispatchWithCompositeExplicitAutogradKernel) {
|
||||
bool called = false;
|
||||
auto m = MAKE_TORCH_LIBRARY(test);
|
||||
@ -1550,6 +1622,7 @@ TEST(NewOperatorRegistrationTest, dispatchWithCompositeExplicitAutogradKernel) {
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, dispatchWithCompositeExplicitAutogradAndCompositeImplicitAutogradKernel) {
|
||||
bool backend_called = false;
|
||||
bool math_called = false;
|
||||
@ -1606,6 +1679,7 @@ TEST(NewOperatorRegistrationTest, dispatchWithCompositeExplicitAutogradAndCompos
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, BackendOverridesCompositeExplicitAutogradKernel) {
|
||||
bool default_called = false;
|
||||
bool backend_called = false;
|
||||
@ -1648,6 +1722,7 @@ TEST(NewOperatorRegistrationTest, BackendOverridesCompositeExplicitAutogradKerne
|
||||
}
|
||||
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, dispatch) {
|
||||
bool cpu_called = false;
|
||||
bool cuda_called = false;
|
||||
@ -1690,6 +1765,7 @@ TEST(NewOperatorRegistrationTest, dispatch) {
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, dispatchAutogradPrecedence) {
|
||||
bool cpu_called = false;
|
||||
auto m = MAKE_TORCH_LIBRARY(test);
|
||||
@ -1731,7 +1807,9 @@ TEST(NewOperatorRegistrationTest, dispatchAutogradPrecedence) {
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, throwsWhenRegisterToBackendMapsToAutogradOther) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
bool sparsecpu_called, math_called = false;
|
||||
auto m = MAKE_TORCH_LIBRARY(test);
|
||||
m.def("fn", torch::dispatch(c10::DispatchKey::SparseCPU, [&](const Tensor& x) { sparsecpu_called = true; return x; }));
|
||||
@ -1752,6 +1830,7 @@ TEST(NewOperatorRegistrationTest, throwsWhenRegisterToBackendMapsToAutogradOther
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, dispatchMultipleTensors) {
|
||||
bool privateuse1_called = false;
|
||||
bool catchall_called = false;
|
||||
@ -1817,6 +1896,7 @@ TEST(NewOperatorRegistrationTest, dispatchMultipleTensors) {
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, dispatchMultiple) {
|
||||
bool cpu_called = false;
|
||||
bool cuda_called = false;
|
||||
@ -1853,6 +1933,7 @@ TEST(NewOperatorRegistrationTest, dispatchMultiple) {
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, fallback) {
|
||||
auto m = MAKE_TORCH_LIBRARY_IMPL(_, CPU);
|
||||
m.fallback(CppFunction::makeFromBoxedFunction<&backend_fallback_kernel>());
|
||||
@ -1865,6 +1946,7 @@ TEST(NewOperatorRegistrationTest, fallback) {
|
||||
EXPECT_EQ("hello _test::dummy", stack[1].toString()->string());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, BackendSelectRedispatchesToCPU) {
|
||||
bool cpu_called = false;
|
||||
bool backend_generic_called = false;
|
||||
@ -1885,6 +1967,7 @@ TEST(NewOperatorRegistrationTest, BackendSelectRedispatchesToCPU) {
|
||||
ASSERT_TRUE(backend_generic_called);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, TorchLibraryTwiceIsError) {
|
||||
{
|
||||
auto m = MAKE_TORCH_LIBRARY(test);
|
||||
@ -1900,6 +1983,7 @@ Tensor dummy_fn(const Tensor& x) {
|
||||
return x;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, CppFunction) {
|
||||
// Just show off the possible ways to register functions
|
||||
auto m = MAKE_TORCH_LIBRARY(test);
|
||||
@ -1927,6 +2011,7 @@ struct OpRegistrationListenerForDelayedListenerTest : public c10::OpRegistration
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, testDelayedListener) {
|
||||
auto listener = std::make_unique<OpRegistrationListenerForDelayedListenerTest>();
|
||||
auto listener_ptr = listener.get();
|
||||
@ -1946,6 +2031,7 @@ TEST(NewOperatorRegistrationTest, testDelayedListener) {
|
||||
EXPECT_EQ(initial_num_deregisters + 1, listener_ptr->num_deregisters_);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(NewOperatorRegistrationTest, testImplNoDefGetsCaught) {
|
||||
auto danglingImpls = Dispatcher::singleton().findDanglingImpls();
|
||||
std::string error_str = "Discovered operators that have been registered through the dispatcher"
|
||||
@ -1962,8 +2048,11 @@ TEST(NewOperatorRegistrationTest, testImplNoDefGetsCaught) {
|
||||
ASSERT_EQ(danglingImpls.size(), 0) << error_str;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
bool called_kernel_cpu = false;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
bool called_kernel_autograd = false;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
bool called_kernel_tracing = false;
|
||||
|
||||
void cpu_kernel(Tensor) {
|
||||
@ -1994,6 +2083,7 @@ void tracing_kernel_redispatching_with_DispatchKeySet(c10::DispatchKeySet ks, Te
|
||||
callOpUnboxedWithPrecomputedDispatchKeySet<void, Tensor>(*op, updatedDispatchKeySet, a);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, callKernelsWithDispatchKeySetConvention_call_redispatchesToLowerPriorityKernels) {
|
||||
auto m = MAKE_TORCH_LIBRARY(test);
|
||||
m.def("fn(Tensor dummy) -> ()");
|
||||
@ -2017,6 +2107,7 @@ TEST(OperatorRegistrationTest, callKernelsWithDispatchKeySetConvention_call_redi
|
||||
EXPECT_TRUE(called_kernel_cpu);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, callKernelsWithDispatchKeySetConvention_callBoxed_redispatchesToLowerPriorityKernels) {
|
||||
auto m = MAKE_TORCH_LIBRARY(test);
|
||||
m.def("fn(Tensor dummy) -> ()");
|
||||
@ -2040,6 +2131,7 @@ TEST(OperatorRegistrationTest, callKernelsWithDispatchKeySetConvention_callBoxed
|
||||
EXPECT_TRUE(called_kernel_cpu);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
TEST(OperatorRegistrationTest, callKernelsWithDispatchKeySetConvention_mixedCallingConventions_redispatchesToLowerPriorityKernels) {
|
||||
auto m = MAKE_TORCH_LIBRARY(test);
|
||||
m.def("fn(Tensor dummy) -> ()");
|
||||
|
@ -32,6 +32,7 @@ std::string qual_name_for_entry(const Entry& entry) {
|
||||
// I haven't implemented that because it's not clear to me how to
|
||||
// dedupe the namespaces array at compile-time, particularly in C++14,
|
||||
// but it would be straightforward if we switched to codegen.
|
||||
// NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
|
||||
constexpr Entry entries[] = {
|
||||
#define SYMBOL_ENTRY(n, s) {#n, #s, n::s, namespaces::n},
|
||||
|
||||
|
@ -390,6 +390,7 @@ MatchTypeReturn matchTypeVariables(
|
||||
lt_formal->getElementType(), lt_actual->getElementType(), type_env);
|
||||
if (!innerMatch.success()) {
|
||||
// propagate the errMsg onward
|
||||
// NOLINTNEXTLINE(performance-no-automatic-move)
|
||||
return innerMatch;
|
||||
}
|
||||
return MatchTypeReturn::Success();
|
||||
@ -415,6 +416,7 @@ MatchTypeReturn matchTypeVariables(
|
||||
const auto result = matchTypeVariables(
|
||||
tp_formal->elements()[i], tp_actual->elements()[i], type_env);
|
||||
if (!result.success()) {
|
||||
// NOLINTNEXTLINE(performance-no-automatic-move)
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@ -429,6 +431,7 @@ MatchTypeReturn matchTypeVariables(
|
||||
const auto innerMatch = matchTypeVariables(
|
||||
lt_formal->getElementType(), lt_actual->getElementType(), type_env);
|
||||
if (!innerMatch.success()) {
|
||||
// NOLINTNEXTLINE(performance-no-automatic-move)
|
||||
return innerMatch;
|
||||
}
|
||||
return MatchTypeReturn::Success();
|
||||
@ -442,6 +445,7 @@ MatchTypeReturn matchTypeVariables(
|
||||
const auto innerMatch = matchTypeVariables(
|
||||
lt_formal->getElementType(), lt_actual->getElementType(), type_env);
|
||||
if (!innerMatch.success()) {
|
||||
// NOLINTNEXTLINE(performance-no-automatic-move)
|
||||
return innerMatch;
|
||||
}
|
||||
return MatchTypeReturn::Success();
|
||||
@ -455,6 +459,7 @@ MatchTypeReturn matchTypeVariables(
|
||||
const auto optionedMatch = matchTypeVariables(
|
||||
opt_formal->getElementType(), opt_actual->getElementType(), type_env);
|
||||
if (!optionedMatch.success()) {
|
||||
// NOLINTNEXTLINE(performance-no-automatic-move)
|
||||
return optionedMatch;
|
||||
}
|
||||
} else if (!actual->isSubtypeOf(NoneType::get())) {
|
||||
@ -870,6 +875,7 @@ std::string TupleType::annotation_str_impl(TypePrinter printer) const {
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(clang-diagnostic-unused-function)
|
||||
static std::vector<bool> findContiguous(
|
||||
const at::IntArrayRef& sizes,
|
||||
const at::IntArrayRef& strides) {
|
||||
@ -941,6 +947,7 @@ VaryingShape<Stride> TensorType::computeStrideProps(
|
||||
return VaryingShape<Stride>{stride_properties};
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
std::atomic<size_t> ShapeSymbol::num_symbols{1};
|
||||
|
||||
template struct VaryingShape<c10::ShapeSymbol>;
|
||||
@ -951,6 +958,7 @@ template struct VaryingShape<int64_t>;
|
||||
TensorType::TensorType(
|
||||
c10::optional<at::ScalarType> scalar_type,
|
||||
c10::optional<Device> device,
|
||||
// NOLINTNEXTLINE(modernize-pass-by-value)
|
||||
const SymbolicShape& sizes,
|
||||
const VaryingShape<Stride>& strides,
|
||||
c10::optional<bool> requires_grad,
|
||||
@ -1503,6 +1511,7 @@ bool ClassType::isSubtypeOfExt(const TypePtr& rhs, std::ostream* why_not) const
|
||||
return false;
|
||||
}
|
||||
if (!self_method->getSchema().isSubtypeOf(
|
||||
// NOLINTNEXTLINE(bugprone-argument-comment)
|
||||
schema, /*is_method=*/true, why_not)) {
|
||||
if (why_not) {
|
||||
*why_not << "Method on class '" << repr_str()
|
||||
@ -1544,6 +1553,7 @@ bool InterfaceType::isSubTypeImpl(
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// NOLINTNEXTLINE(bugprone-argument-comment)
|
||||
if (!self_schema->isSubtypeOf(schema, /*is_method=*/true, why_not)) {
|
||||
if (why_not) {
|
||||
*why_not << "Method on interface '" << lhs.repr_str()
|
||||
@ -1631,6 +1641,7 @@ void ClassType::checkNotExist(const std::string& name, const std::string& what)
|
||||
}
|
||||
|
||||
// Check no overlap with existing attributes
|
||||
// NOLINTNEXTLINE(modernize-loop-convert)
|
||||
for (size_t i = 0; i < attributes_.size(); ++i) {
|
||||
TORCH_CHECK(
|
||||
name != attributes_[i].getName(),
|
||||
|
@ -44,6 +44,7 @@ public:
|
||||
template <int64_t mask>
|
||||
static Vec256<c10::complex<double>> blend(const Vec256<c10::complex<double>>& a, const Vec256<c10::complex<double>>& b) {
|
||||
// convert c10::complex<V> index mask to V index mask: xy -> xxyy
|
||||
// NOLINTNEXTLINE(clang-diagnostic-warning)
|
||||
switch (mask) {
|
||||
case 0:
|
||||
return a;
|
||||
|
@ -50,6 +50,7 @@ public:
|
||||
template <int64_t mask>
|
||||
static Vec256<c10::complex<float>> blend(const Vec256<c10::complex<float>>& a, const Vec256<c10::complex<float>>& b) {
|
||||
// convert c10::complex<V> index mask to V index mask: xy -> xxyy
|
||||
// NOLINTNEXTLINE(clang-diagnostic-warning)
|
||||
switch (mask) {
|
||||
case 0:
|
||||
return a;
|
||||
|
@ -527,6 +527,7 @@ struct Vec256<c10::qint8> : public Vec256qi {
|
||||
|
||||
// This is needed because the compiler emits awful code for the default
|
||||
// constructor for moving the enum
|
||||
// NOLINTNEXTLINE(clang-diagnostic-deprecated-copy)
|
||||
Vec256(const Vec256<c10::qint8>& other) : Vec256qi(other.vals) { }
|
||||
|
||||
void store(void* ptr, int count = size()) const {
|
||||
@ -799,6 +800,7 @@ struct Vec256<c10::quint8> : public Vec256qi {
|
||||
vals = _mm256_set1_epi8(uw);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(clang-diagnostic-deprecated-copy)
|
||||
Vec256(const Vec256<c10::quint8>& other) : Vec256qi(other.vals) { }
|
||||
|
||||
void store(void* ptr, int count = size()) const {
|
||||
|
@ -4,6 +4,7 @@
|
||||
namespace at {
|
||||
namespace detail {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
C10_REGISTER_GUARD_IMPL(CPU, c10::impl::NoOpDeviceGuardImpl<DeviceType::CPU>);
|
||||
|
||||
}} // namespace at::detail
|
||||
|
@ -24,6 +24,7 @@ namespace detail {
|
||||
//
|
||||
// CUDAHooks doesn't actually contain any data, so leaking it is very benign;
|
||||
// you're probably losing only a word (the vptr in the allocated object.)
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
static CUDAHooksInterface* cuda_hooks = nullptr;
|
||||
|
||||
const CUDAHooksInterface& getCUDAHooks() {
|
||||
@ -45,6 +46,7 @@ const CUDAHooksInterface& getCUDAHooks() {
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
C10_DEFINE_REGISTRY(CUDAHooksRegistry, CUDAHooksInterface, CUDAHooksArgs)
|
||||
|
||||
} // namespace at
|
||||
|
@ -17,6 +17,7 @@ const HIPHooksInterface& getHIPHooks() {
|
||||
hip_hooks = HIPHooksRegistry()->Create("HIPHooks", HIPHooksArgs{});
|
||||
if (!hip_hooks) {
|
||||
hip_hooks =
|
||||
// NOLINTNEXTLINE(modernize-make-unique)
|
||||
std::unique_ptr<HIPHooksInterface>(new HIPHooksInterface());
|
||||
}
|
||||
});
|
||||
@ -24,6 +25,7 @@ const HIPHooksInterface& getHIPHooks() {
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
C10_DEFINE_REGISTRY(HIPHooksRegistry, HIPHooksInterface, HIPHooksArgs)
|
||||
|
||||
} // namespace at
|
||||
|
@ -4,6 +4,7 @@
|
||||
namespace at {
|
||||
namespace detail {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
C10_REGISTER_GUARD_IMPL(Meta, c10::impl::NoOpDeviceGuardImpl<DeviceType::Meta>);
|
||||
|
||||
}} // namespace at::detail
|
||||
|
@ -6,6 +6,7 @@
|
||||
namespace at {
|
||||
namespace metal {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
std::atomic<const MetalInterface*> g_metal_impl_registry;
|
||||
|
||||
MetalImplRegistrar::MetalImplRegistrar(MetalInterface* impl) {
|
||||
|
@ -15,24 +15,43 @@ namespace at { namespace native {
|
||||
static const double SELU_ALPHA = 1.6732632423543772848170429916717;
|
||||
static const double SELU_SCALE = 1.0507009873554804934193349852946;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(elu_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(elu_backward_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(softplus_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(softplus_backward_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(log_sigmoid_cpu_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(log_sigmoid_backward_cpu_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(threshold_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(hardtanh_backward_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(hardsigmoid_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(hardsigmoid_backward_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(hardswish_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(hardswish_backward_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(hardshrink_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(softshrink_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(shrink_backward_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(leaky_relu_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(leaky_relu_backward_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(silu_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(silu_backward_stub);
|
||||
|
||||
Tensor hardtanh(const Tensor& self, const Scalar& min, const Scalar& max) {
|
||||
@ -174,6 +193,7 @@ Tensor selu(const Tensor & self) {
|
||||
}
|
||||
|
||||
Tensor relu6(const Tensor & self) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
return at::hardtanh(self, /*min_val=*/0, /*max_val=*/6);
|
||||
}
|
||||
|
||||
@ -182,6 +202,7 @@ Tensor & selu_(Tensor & self) {
|
||||
}
|
||||
|
||||
Tensor & relu6_(Tensor & self) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
return at::hardtanh_(self, /*min_val=*/0, /*max_val=*/6);
|
||||
}
|
||||
|
||||
@ -320,6 +341,7 @@ Tensor rrelu_with_noise_backward(
|
||||
bool is_result) {
|
||||
auto lower_tensor = scalar_to_tensor(lower);
|
||||
auto upper_tensor = scalar_to_tensor(upper);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (training && (upper_tensor - lower_tensor).item().to<float>() > 1E-6) {
|
||||
return grad_output.mul(noise);
|
||||
} else {
|
||||
@ -428,6 +450,7 @@ void inline prelu_cpu_kernel_share_weights(
|
||||
auto input_data = input.data_ptr<scalar_t>();
|
||||
auto weight_val = weight.data_ptr<scalar_t>()[0];
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
at::parallel_for(0, input_numel, 1000, [&](int64_t start, int64_t end) {
|
||||
for (auto i = start; i < end; i++) {
|
||||
scalar_t input_data_val = input_data[i];
|
||||
@ -468,6 +491,7 @@ void inline prelu_cpu_kernel_multi_weights(
|
||||
}
|
||||
}
|
||||
};
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (input.numel() > 1000) {
|
||||
at::parallel_for(0, input_dim0_size, 0, loop);
|
||||
} else {
|
||||
@ -541,6 +565,7 @@ void inline prelu_cpu_backward_kernel_share_weights(
|
||||
auto input_grad_data = input_grad.data_ptr<scalar_t>();
|
||||
auto weight_grad_data = weight_grad.data_ptr<scalar_t>();
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
scalar_t sum = at::parallel_reduce(0, input_numel, 1000, scalar_t(0),
|
||||
[&](int64_t start, int64_t end, scalar_t ident) -> scalar_t {
|
||||
scalar_t partial_sum = ident;
|
||||
@ -595,6 +620,7 @@ void inline prelu_cpu_backward_kernel_multi_weights(
|
||||
}
|
||||
}
|
||||
};
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (input.numel() > 1000) {
|
||||
at::parallel_for(0, input_dim0_size, 0, loop);
|
||||
} else {
|
||||
@ -745,7 +771,9 @@ Tensor infinitely_differentiable_gelu_backward(
|
||||
const Tensor& grad,
|
||||
const Tensor& self) {
|
||||
constexpr double kAlpha = M_2_SQRTPI * M_SQRT1_2 * 0.5;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
Tensor cdf = (1.0 + (self * M_SQRT1_2).erf_()).mul_(0.5);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
Tensor pdf = (-0.5 * self * self).exp_();
|
||||
return cdf.addcmul_(self, pdf, kAlpha).mul_(grad);
|
||||
}
|
||||
@ -852,7 +880,9 @@ Tensor& log_sigmoid_backward_out_cpu(const Tensor& grad_output,
|
||||
return grad_input;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(GeluKernel);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(GeluBackwardKernel);
|
||||
|
||||
}} // namespace at::native
|
||||
|
@ -28,7 +28,9 @@ namespace {
|
||||
"expected dtype ", input.dtype(), " for `output` but got dtype ", output.dtype());
|
||||
|
||||
int64_t channels = input.size(-3);
|
||||
// NOLINTNEXTLINE(clang-diagnostic-unused-variable,clang-analyzer-deadcode.DeadStores)
|
||||
int64_t input_height = input.size(-2);
|
||||
// NOLINTNEXTLINE(clang-diagnostic-unused-variable,clang-analyzer-deadcode.DeadStores)
|
||||
int64_t input_width = input.size(-1);
|
||||
int64_t output_height = output_size[0];
|
||||
int64_t output_width = output_size[1];
|
||||
@ -134,7 +136,9 @@ namespace {
|
||||
return grad_input;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(adaptive_avg_pool2d_kernel);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(adaptive_avg_pool2d_backward_kernel);
|
||||
|
||||
} // at::native
|
||||
|
@ -8,10 +8,12 @@ namespace native {
|
||||
namespace {
|
||||
|
||||
inline int start_index(int a, int b, int c) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
||||
return (int)std::floor((float)(a * c) / b);
|
||||
}
|
||||
|
||||
inline int end_index(int a, int b, int c) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
||||
return (int)std::ceil((float)((a + 1) * c) / b);
|
||||
}
|
||||
|
||||
@ -137,6 +139,7 @@ void adaptive_avg_pool3d_out_cpu_template(
|
||||
istrideW);
|
||||
});
|
||||
} else {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
output.resize_({input.size(-5), sizeD, osizeT, osizeH, osizeW});
|
||||
int64_t n = input.size(0);
|
||||
|
||||
|
@ -64,10 +64,12 @@ namespace native {
|
||||
namespace {
|
||||
|
||||
inline int start_index(int a, int b, int c) {
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
return (int)std::floor((float)(a * c) / b);
|
||||
}
|
||||
|
||||
inline int end_index(int a, int b, int c) {
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
return (int)std::ceil((float)((a + 1) * c) / b);
|
||||
}
|
||||
|
||||
@ -94,6 +96,7 @@ static void adaptive_max_pool2d_single_out_frame(
|
||||
for (auto d = start; d < end; d++)
|
||||
{
|
||||
/* loop over output */
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t oh, ow;
|
||||
for(oh = 0; oh < osizeH; oh++)
|
||||
{
|
||||
@ -188,6 +191,7 @@ static void adaptive_max_pool2d_backward_single_out_frame(
|
||||
int64_t *ind_p_d = indices + d*osizeH*osizeW;
|
||||
|
||||
/* calculate max points */
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t oh, ow;
|
||||
for(oh = 0; oh < osizeH; oh++)
|
||||
{
|
||||
@ -316,10 +320,15 @@ TORCH_IMPL_FUNC(adaptive_max_pool2d_backward_out_cpu)
|
||||
int dimW = 2;
|
||||
int dimH = 1;
|
||||
int64_t sizeB = 1;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int sizeD;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int isizeH;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int isizeW;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int osizeH;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int osizeW;
|
||||
|
||||
/* get contiguous gradOutput */
|
||||
|
@ -32,13 +32,16 @@ static void avg_pool2d_out_frame(
|
||||
at::parallel_for(0, nInputPlane, 0, [&](int64_t start, int64_t end) {
|
||||
for (auto k = start; k < end; k++)
|
||||
{
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t p;
|
||||
for(p = 0; p < nbatch; p++)
|
||||
{
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t xx, yy;
|
||||
/* For all output pixels... */
|
||||
scalar_t *ptr_output = output_data + p*nInputPlane*outputWidth*outputHeight + k*outputWidth*outputHeight;
|
||||
const scalar_t *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t i;
|
||||
for(i = 0; i < outputWidth*outputHeight; i++)
|
||||
ptr_output[i] = 0;
|
||||
@ -52,6 +55,7 @@ static void avg_pool2d_out_frame(
|
||||
int64_t wstart = xx * dW - padW;
|
||||
int64_t hend = std::min(hstart + kH, inputHeight + padH);
|
||||
int64_t wend = std::min(wstart + kW, inputWidth + padW);
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
int pool_size = (hend - hstart) * (wend - wstart);
|
||||
hstart = std::max(hstart, (int64_t) 0);
|
||||
wstart = std::max(wstart, (int64_t) 0);
|
||||
@ -65,6 +69,7 @@ static void avg_pool2d_out_frame(
|
||||
|
||||
scalar_t sum = 0;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int divide_factor;
|
||||
if (divisor_override.has_value()) {
|
||||
divide_factor = divisor_override.value();
|
||||
@ -72,10 +77,12 @@ static void avg_pool2d_out_frame(
|
||||
if(count_include_pad) {
|
||||
divide_factor = pool_size;
|
||||
} else {
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
divide_factor = (hend - hstart) * (wend - wstart);
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t kx, ky;
|
||||
|
||||
for(ky = hstart; ky < hend; ky++)
|
||||
@ -193,15 +200,18 @@ static void avg_pool2d_backward_out_frame(
|
||||
at::parallel_for(0, nInputPlane, 0, [&](int64_t start, int64_t end) {
|
||||
for (auto k = start; k < end; k++)
|
||||
{
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t p;
|
||||
for(p = 0; p < nbatch; p++)
|
||||
{
|
||||
const scalar_t *ptr_gradOutput = gradOutput_data + p*nInputPlane*outputHeight*outputWidth + k*outputWidth*outputHeight;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t xx, yy;
|
||||
|
||||
scalar_t* ptr_gi = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight;
|
||||
scalar_t *ptr_gradInput = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t i;
|
||||
for(i=0; i<inputWidth*inputHeight; i++)
|
||||
ptr_gi[i] = 0.0;
|
||||
@ -214,6 +224,7 @@ static void avg_pool2d_backward_out_frame(
|
||||
int64_t wstart = xx * dW - padW;
|
||||
int64_t hend = std::min(hstart + kH, inputHeight + padH);
|
||||
int64_t wend = std::min(wstart + kW, inputWidth + padW);
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
int pool_size = (hend - hstart) * (wend - wstart);
|
||||
hstart = std::max(hstart, (int64_t) 0);
|
||||
wstart = std::max(wstart, (int64_t) 0);
|
||||
@ -222,6 +233,7 @@ static void avg_pool2d_backward_out_frame(
|
||||
|
||||
scalar_t z = *ptr_gradOutput++;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int divide_factor;
|
||||
if (divisor_override.has_value()) {
|
||||
divide_factor = divisor_override.value();
|
||||
@ -229,10 +241,12 @@ static void avg_pool2d_backward_out_frame(
|
||||
if(count_include_pad) {
|
||||
divide_factor = pool_size;
|
||||
} else {
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
divide_factor = (hend - hstart) * (wend - wstart);
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t kx, ky;
|
||||
for(ky = hstart ; ky < hend; ky++)
|
||||
{
|
||||
@ -273,6 +287,7 @@ Tensor& avg_pool2d_backward_out_cpu_template(
|
||||
"avg_pool2d: padding must either be a single int, or a tuple of two ints");
|
||||
const int padH = safe_downcast<int, int64_t>(padding[0]);
|
||||
const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]);
|
||||
// NOLINTNEXTLINE(clang-diagnostic-unused-variable,clang-analyzer-deadcode.DeadStores)
|
||||
const int64_t ndim = input.ndimension();
|
||||
|
||||
TORCH_CHECK(!divisor_override.has_value() || divisor_override.value() != 0, "divisor must be not zero");
|
||||
|
@ -36,6 +36,7 @@ static void avg_pool3d_out_frame(
|
||||
at::parallel_for(0, nslices, 0, [&](int64_t start, int64_t end) {
|
||||
for (auto k = start; k < end; k++)
|
||||
{
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t i, j, ti;
|
||||
|
||||
/* local pointers. */
|
||||
@ -71,6 +72,7 @@ static void avg_pool3d_out_frame(
|
||||
continue;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int divide_factor;
|
||||
if (divisor_override.has_value()) {
|
||||
divide_factor = divisor_override.value();
|
||||
@ -78,12 +80,14 @@ static void avg_pool3d_out_frame(
|
||||
if(count_include_pad) {
|
||||
divide_factor = pool_size;
|
||||
} else {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
||||
divide_factor = (tend - tstart) * (hend - hstart) * (wend - wstart);
|
||||
}
|
||||
}
|
||||
|
||||
/* compute local sum: */
|
||||
scalar_t sum = 0.0;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t x, y, z;
|
||||
|
||||
for (z = tstart; z < tend; z++)
|
||||
@ -247,6 +251,7 @@ static void avg_pool3d_backward_out_frame(
|
||||
at::parallel_for(0, nslices, 0, [&](int64_t start, int64_t end) {
|
||||
for (auto k = start; k < end; k++)
|
||||
{
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t i, j, ti;
|
||||
|
||||
/* local pointers */
|
||||
@ -276,6 +281,7 @@ static void avg_pool3d_backward_out_frame(
|
||||
hend = std::min(hend, iheight);
|
||||
wend = std::min(wend, iwidth);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int divide_factor;
|
||||
if (divisor_override.has_value()) {
|
||||
divide_factor = divisor_override.value();
|
||||
@ -283,6 +289,7 @@ static void avg_pool3d_backward_out_frame(
|
||||
if(count_include_pad) {
|
||||
divide_factor = pool_size;
|
||||
} else {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
||||
divide_factor = (tend - tstart) * (hend - hstart) * (wend - wstart);
|
||||
}
|
||||
}
|
||||
@ -290,6 +297,7 @@ static void avg_pool3d_backward_out_frame(
|
||||
/* scatter gradients out to footprint: */
|
||||
scalar_t val = *op++;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t x,y,z;
|
||||
for (z = tstart; z < tend; z++)
|
||||
{
|
||||
|
@ -1059,6 +1059,7 @@ static void apply_inverse(Tensor& self, Tensor& infos_lu, Tensor& infos_getri) {
|
||||
auto infos_lu_data = infos_lu.data_ptr<int>();
|
||||
auto infos_getri_data = infos_getri.data_ptr<int>();
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int info;
|
||||
// Run once, first to get the optimum work size
|
||||
// Since we deal with batches of matrices with the same dimensions, doing this outside
|
||||
@ -1222,6 +1223,7 @@ static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, std::vector<i
|
||||
auto n = A.size(-2);
|
||||
auto nrhs = b.size(-1);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int info;
|
||||
for (const auto i : c10::irange(batch_size)) {
|
||||
scalar_t* A_working_ptr = &A_data[i * A_mat_stride];
|
||||
@ -1285,6 +1287,7 @@ static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos
|
||||
auto n = self.size(-2);
|
||||
auto lda = std::max<int64_t>(1, n);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int info;
|
||||
for (const auto i : c10::irange(batch_size)) {
|
||||
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
|
||||
@ -1353,6 +1356,7 @@ Tensor& linalg_cholesky_out(const Tensor &self, Tensor &result) {
|
||||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(cholesky_inverse_stub);
|
||||
|
||||
Tensor& cholesky_inverse_out_info(Tensor& result, Tensor& infos, const Tensor& input, bool upper) {
|
||||
@ -1492,6 +1496,7 @@ std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cpu(const Tensor& self, bool pi
|
||||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(triangular_solve_stub);
|
||||
|
||||
/*
|
||||
@ -1601,6 +1606,7 @@ std::tuple<Tensor&, Tensor&> triangular_solve_out(const Tensor& self, const Tens
|
||||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(geqrf_stub);
|
||||
|
||||
static void geqrf_out_helper(const Tensor& input, const Tensor& QR, const Tensor& tau) {
|
||||
@ -1694,6 +1700,7 @@ std::tuple<Tensor, Tensor> geqrf(const Tensor& input) {
|
||||
}
|
||||
|
||||
std::tuple<Tensor, Tensor> _linalg_qr_helper_cpu(const Tensor& self, std::string mode) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
bool compute_q, reduced;
|
||||
std::tie(compute_q, reduced) = _parse_qr_mode(mode);
|
||||
int64_t m = self.size(-2), n = self.size(-1);
|
||||
@ -1708,6 +1715,7 @@ std::tuple<Tensor, Tensor> _linalg_qr_helper_cpu(const Tensor& self, std::string
|
||||
|
||||
// Setup input geometry for apply_orgqr
|
||||
std::vector<int64_t> q_sizes, q_strides;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t n_columns_q;
|
||||
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced);
|
||||
|
||||
@ -1778,6 +1786,7 @@ std::tuple<Tensor&,Tensor&> qr_out(const Tensor& self, bool some, Tensor& Q, Ten
|
||||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ orgqr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(orgqr_stub);
|
||||
|
||||
/*
|
||||
@ -1918,6 +1927,7 @@ Tensor orgqr(const Tensor& input, const Tensor& tau) {
|
||||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eigh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(linalg_eigh_stub);
|
||||
|
||||
/*
|
||||
@ -1949,6 +1959,7 @@ std::tuple<Tensor&, Tensor&> linalg_eigh_out_info(
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.device() == values.device());
|
||||
|
||||
// eigenvalues are always real-valued
|
||||
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
|
||||
ScalarType real_dtype = toValueType(input.scalar_type());
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.scalar_type() == real_dtype);
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.scalar_type() == vectors.scalar_type());
|
||||
@ -1986,6 +1997,7 @@ std::tuple<Tensor&, Tensor&> linalg_eigh_out_info(
|
||||
// linalg_eigh_stub performs calculations in-place and 'vectors' must be a copy of 'input'
|
||||
vectors.copy_(input);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
||||
char uplo = std::toupper(uplo_str[0]);
|
||||
bool upper = (uplo == 'U');
|
||||
|
||||
@ -2087,6 +2099,7 @@ static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool
|
||||
char uplo = upper ? 'U' : 'L';
|
||||
char jobz = eigenvectors ? 'V' : 'N';
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int info;
|
||||
// Run once, first to get the optimum work size.
|
||||
// Since we deal with batches of matrices with the same dimensions, doing this outside
|
||||
@ -2233,6 +2246,7 @@ static Tensor& linalg_eig_make_complex_eigenvectors(Tensor& complex_vectors, con
|
||||
return complex_vectors;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(linalg_eig_stub);
|
||||
|
||||
std::tuple<Tensor&, Tensor&> linalg_eig_out_info(const Tensor& input, Tensor& values, Tensor& vectors, Tensor& infos, bool compute_eigenvectors) {
|
||||
@ -2309,6 +2323,7 @@ std::tuple<Tensor&, Tensor&> linalg_eig_out_info(const Tensor& input, Tensor& va
|
||||
// See: https://github.com/pytorch/pytorch/pull/52491#issuecomment-795685687
|
||||
// Here we call CPU path for matrices smaller than 2048x2048
|
||||
// that should be in general significantly faster than calling MAGMA
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (input.size(-1) <= 2048) {
|
||||
linalg_eig_stub(at::kCPU, real_imag_values, maybe_complex_vectors, infos, input.to(kCPU), compute_eigenvectors);
|
||||
} else {
|
||||
@ -2512,6 +2527,7 @@ Tensor linalg_eigvals(const Tensor& input) {
|
||||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(eig_stub);
|
||||
|
||||
std::tuple<Tensor&, Tensor&> eig_out(const Tensor& self, bool eigenvectors, Tensor& e, Tensor& v) {
|
||||
@ -2576,12 +2592,14 @@ static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
|
||||
auto VT_stride = matrixStride(VT);
|
||||
auto batchsize = batchCount(self);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int info;
|
||||
auto m = self.size(-2);
|
||||
auto n = self.size(-1);
|
||||
auto lda = std::max<int64_t>(1, m);
|
||||
auto ldvt = std::max<int64_t>(1, n);
|
||||
auto mn = std::min(m, n);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
Tensor iwork = at::empty({8 * mn}, at::kInt);
|
||||
auto iwork_data = iwork.data_ptr<int>();
|
||||
Tensor rwork;
|
||||
@ -2747,6 +2765,7 @@ Tensor linalg_svdvals(const Tensor& input) {
|
||||
" dimensions instead");
|
||||
Tensor singular_values;
|
||||
std::tie(std::ignore, singular_values, std::ignore) =
|
||||
// NOLINTNEXTLINE(bugprone-argument-comment)
|
||||
at::_svd_helper(input, /*full_matrices=*/false, /*compute_uv=*/false);
|
||||
return singular_values;
|
||||
}
|
||||
@ -2761,6 +2780,7 @@ Tensor& linalg_svdvals_out(const Tensor& input, Tensor& result) {
|
||||
|
||||
Tensor singular_values_tmp;
|
||||
std::tie(std::ignore, singular_values_tmp, std::ignore) =
|
||||
// NOLINTNEXTLINE(bugprone-argument-comment)
|
||||
at::_svd_helper(input, /*full_matrices=*/false, /*compute_uv=*/false);
|
||||
|
||||
at::native::resize_output(result, singular_values_tmp.sizes());
|
||||
@ -2821,6 +2841,7 @@ struct LapackLstsqHelper {
|
||||
int iwork_opt; // used to decide the opt `iwork` size with lwork=-1
|
||||
int* iwork_ptr = &iwork_opt;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
LapackLstsqHelper(LapackLstsqDriverType driver_type, func_t driver)
|
||||
: driver_type{driver_type}, driver{driver}
|
||||
{}
|
||||
@ -2877,12 +2898,14 @@ struct LapackLstsqHelper {
|
||||
return *this;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t rwork_len;
|
||||
switch (this->driver_type) {
|
||||
case LapackLstsqDriverType::Gelsy:
|
||||
rwork_len = std::max<int64_t>(1, 2 * n);
|
||||
break;
|
||||
case LapackLstsqDriverType::Gelss:
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
rwork_len = std::max<int64_t>(1, 5 * std::min(m, n));
|
||||
break;
|
||||
// case LapackLstsqDriverType::Gelsd:
|
||||
@ -3440,6 +3463,7 @@ static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, st
|
||||
auto n = lu.size(-2);
|
||||
auto nrhs = b.size(-1);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int info;
|
||||
for (const auto i : c10::irange(batch_size)) {
|
||||
scalar_t* b_working_ptr = &b_data[i * b_stride];
|
||||
|
@ -37,6 +37,7 @@ void apply_reflect_conj_tri_single(scalar_t* self, int64_t n, int64_t stride, bo
|
||||
};
|
||||
}
|
||||
// For small matrices OpenMP overhead is too large
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (n < 256) {
|
||||
loop(0, n);
|
||||
} else {
|
||||
@ -101,6 +102,7 @@ void apply_eig(const Tensor& self, bool eigenvectors, Tensor& vals_, Tensor& vec
|
||||
scalar_t* wr = vals_data;
|
||||
|
||||
scalar_t* vecs_data = eigenvectors ? vecs_.data_ptr<scalar_t>() : nullptr;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
||||
int ldvr = eigenvectors ? n : 1;
|
||||
|
||||
Tensor rwork;
|
||||
@ -114,6 +116,7 @@ void apply_eig(const Tensor& self, bool eigenvectors, Tensor& vals_, Tensor& vec
|
||||
if (n > 0) {
|
||||
// call lapackEig once to get the optimal size for work data
|
||||
scalar_t wkopt;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int info;
|
||||
lapackEig<scalar_t, value_t>('N', jobvr, n, self_data, n, wr,
|
||||
nullptr, 1, vecs_data, ldvr, &wkopt, -1, rwork_data, &info);
|
||||
@ -155,10 +158,12 @@ std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvector
|
||||
? at::empty_strided({n, n}, {1, n}, options)
|
||||
: Tensor();
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t info;
|
||||
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cpu", [&]{
|
||||
apply_eig<scalar_t>(self_, eigenvectors, vals_, vecs_, &info);
|
||||
});
|
||||
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
|
||||
singleCheckErrors(info, "eig_cpu");
|
||||
return std::tuple<Tensor, Tensor>(vals_, vecs_);
|
||||
}
|
||||
@ -277,9 +282,11 @@ void apply_lapack_eigh(Tensor& values, Tensor& vectors, Tensor& infos, bool uppe
|
||||
int liwork = -1;
|
||||
scalar_t lwork_query;
|
||||
value_t rwork_query;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int iwork_query;
|
||||
|
||||
// call lapackSyevd once to get the optimal size for work data
|
||||
// NOLINTNEXTLINE(clang-diagnostic-unused-variable)
|
||||
scalar_t work_query;
|
||||
lapackSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_data, lda, values_data,
|
||||
&lwork_query, lwork, &rwork_query, lrwork, &iwork_query, liwork, infos_data);
|
||||
@ -366,6 +373,7 @@ static void apply_geqrf(const Tensor& input, const Tensor& tau, int64_t m, int64
|
||||
auto batch_size = batchCount(input);
|
||||
auto lda = std::max<int>(1, m);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int info;
|
||||
// Run once, first to get the optimum work size.
|
||||
// Since we deal with batches of matrices with the same dimensions, doing this outside
|
||||
@ -436,6 +444,7 @@ inline void apply_orgqr(Tensor& self, const Tensor& tau, int64_t n_columns) {
|
||||
auto m = self.size(-2);
|
||||
auto k = tau.size(-1);
|
||||
auto lda = std::max<int64_t>(1, m);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int info;
|
||||
|
||||
// LAPACK's requirement
|
||||
@ -533,38 +542,59 @@ void triangular_solve_kernel(Tensor& A, Tensor& B, Tensor& infos, bool upper, bo
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_ARCH_DISPATCH(cholesky_inverse_stub, DEFAULT, &cholesky_inverse_kernel_impl);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_AVX_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_AVX2_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl);
|
||||
REGISTER_VSX_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_ARCH_DISPATCH(eig_stub, DEFAULT, &eig_kernel_impl);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_AVX_DISPATCH(eig_stub, &eig_kernel_impl);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_AVX2_DISPATCH(eig_stub, &eig_kernel_impl);
|
||||
REGISTER_VSX_DISPATCH(eig_stub, &eig_kernel_impl);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_ARCH_DISPATCH(linalg_eig_stub, DEFAULT, &linalg_eig_kernel);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_AVX_DISPATCH(linalg_eig_stub, &linalg_eig_kernel);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_AVX2_DISPATCH(linalg_eig_stub, &linalg_eig_kernel);
|
||||
REGISTER_VSX_DISPATCH(linalg_eig_stub, &linalg_eig_kernel);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_ARCH_DISPATCH(linalg_eigh_stub, DEFAULT, &linalg_eigh_kernel);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_AVX_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_AVX2_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel);
|
||||
REGISTER_VSX_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_ARCH_DISPATCH(geqrf_stub, DEFAULT, &geqrf_kernel);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_AVX_DISPATCH(geqrf_stub, &geqrf_kernel);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_AVX2_DISPATCH(geqrf_stub, &geqrf_kernel);
|
||||
REGISTER_VSX_DISPATCH(geqrf_stub, &geqrf_kernel);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_ARCH_DISPATCH(orgqr_stub, DEFAULT, &orgqr_kernel_impl);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_AVX_DISPATCH(orgqr_stub, &orgqr_kernel_impl);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_AVX2_DISPATCH(orgqr_stub, &orgqr_kernel_impl);
|
||||
REGISTER_VSX_DISPATCH(orgqr_stub, &orgqr_kernel_impl);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_ARCH_DISPATCH(triangular_solve_stub, DEFAULT, &triangular_solve_kernel);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_AVX_DISPATCH(triangular_solve_stub, &triangular_solve_kernel);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
REGISTER_AVX2_DISPATCH(triangular_solve_stub, &triangular_solve_kernel);
|
||||
REGISTER_VSX_DISPATCH(triangular_solve_stub, &triangular_solve_kernel);
|
||||
|
||||
|
@ -122,6 +122,7 @@ Tensor _remove_batch_dim(const Tensor& self, int64_t level, int64_t batch_size,
|
||||
TORCH_INTERNAL_ASSERT(batched != nullptr);
|
||||
|
||||
Tensor self_without_bdim;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t newly_exposed_logical_dim;
|
||||
std::tie(self_without_bdim, newly_exposed_logical_dim) = remove_existing_batch_dim(batched, level);
|
||||
return movedim(self_without_bdim, newly_exposed_logical_dim, out_dim);
|
||||
|
@ -41,6 +41,7 @@ TORCH_META_FUNC2(div, Tensor) (const Tensor& self, const Tensor& other) {
|
||||
TORCH_META_FUNC2(div, Tensor_mode) (const Tensor& self, const Tensor& other, c10::optional<std::string> rounding_mode) {
|
||||
if (!rounding_mode.has_value()) {
|
||||
build_binary_float_op(maybe_get_output(), self, other);
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone)
|
||||
} else if (*rounding_mode == "trunc") {
|
||||
build_binary_op(maybe_get_output(), self, other);
|
||||
} else if (*rounding_mode == "floor") {
|
||||
@ -67,47 +68,89 @@ TORCH_META_FUNC(atan2) (const Tensor& self, const Tensor& other) {
|
||||
|
||||
namespace native {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(add_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(add_clamp_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(sub_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(mul_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(div_true_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(div_floor_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(div_trunc_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(remainder_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(atan2_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(bitwise_and_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(bitwise_or_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(bitwise_xor_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(lshift_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(rshift_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(logical_and_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(logical_or_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(logical_xor_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(lt_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(le_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(gt_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(ge_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(eq_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(ne_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(sigmoid_backward_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(logit_backward_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(tanh_backward_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(maximum_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(minimum_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(fmax_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(fmin_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(fmod_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(logaddexp_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(logaddexp2_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(gcd_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(lcm_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(hypot_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(igamma_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(igammac_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(nextafter_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(heaviside_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(copysign_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(xlogy_stub);
|
||||
|
||||
TORCH_IMPL_FUNC(add_out) (
|
||||
@ -1126,10 +1169,12 @@ Tensor& heaviside_(Tensor& self, const Tensor& values) {
|
||||
}
|
||||
|
||||
Tensor& ldexp_out(const Tensor& self, const Tensor& other, Tensor& result) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
return at::mul_out(result, self, at::pow(2.0, other));
|
||||
}
|
||||
|
||||
Tensor ldexp(const Tensor& self, const Tensor& other) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
return at::mul(self, at::pow(2.0, other));
|
||||
}
|
||||
|
||||
|
@ -50,6 +50,7 @@ TORCH_IMPL_FUNC(addmv_out_cpu)(const Tensor &self, const Tensor &mat, const Tens
|
||||
result.zero_();
|
||||
} else {
|
||||
at::cpu::mul_out(
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
const_cast<Tensor&>(result),
|
||||
self,
|
||||
at::native::scalar_tensor(
|
||||
@ -57,6 +58,7 @@ TORCH_IMPL_FUNC(addmv_out_cpu)(const Tensor &self, const Tensor &mat, const Tens
|
||||
}
|
||||
} else {
|
||||
if (!result.is_same(*self_) && betaval != 0.0) { //if beta is 0, result contents is ignored
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
at::native::copy_(const_cast<Tensor&>(result), *self_);
|
||||
}
|
||||
if (result.numel() != 0) {
|
||||
|
@ -217,6 +217,7 @@ AT_FORALL_COMPLEX_TYPES(INSTANTIATE);
|
||||
namespace blas_impl {
|
||||
#if AT_BUILD_WITH_BLAS()
|
||||
float dot_fast_path(int n, float* x, int incx, float* y, int incy) {
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
return sdot_(&n, x, &incx, y, &incy);
|
||||
}
|
||||
|
||||
@ -257,6 +258,7 @@ scalar_t dot_naive(
|
||||
scalar_t* y,
|
||||
int64_t incy,
|
||||
Functor op) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t i;
|
||||
scalar_t sum = 0;
|
||||
for (i = 0; i < n; i++) {
|
||||
|
@ -97,6 +97,7 @@ fbgemm::matrix_op_t to_fbgemm(TransposeType trans) {
|
||||
|
||||
} // namespace (anonymous)
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(gemm_stub);
|
||||
|
||||
void gemm(
|
||||
@ -262,6 +263,7 @@ void gemm(
|
||||
transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(axpy_stub);
|
||||
|
||||
void axpy(int64_t n, double a, const double *x, int64_t incx, double *y, int64_t incy) {
|
||||
@ -348,6 +350,7 @@ void axpy(int64_t n, c10::complex<float> a, const c10::complex<float> *x, int64_
|
||||
n, a, x, incx, y, incy);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(copy_stub);
|
||||
|
||||
void copy(int64_t n, const double *x, int64_t incx, double *y, int64_t incy) {
|
||||
|
@ -23,8 +23,10 @@ constexpr int MIOPEN_DIM_MAX = 5;
|
||||
|
||||
namespace at { namespace native {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(convolution_depthwise3x3_winograd_stub);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
struct ConvParams {
|
||||
std::vector<int64_t> stride;
|
||||
std::vector<int64_t> padding;
|
||||
@ -74,6 +76,7 @@ std::ostream& operator<<(std::ostream & out, const ConvParams& params) {
|
||||
|
||||
auto ConvParams::is_strided() const -> bool {
|
||||
bool is_strided = false;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
||||
for (int s : stride) {
|
||||
is_strided |= (s != 1);
|
||||
}
|
||||
@ -82,6 +85,7 @@ auto ConvParams::is_strided() const -> bool {
|
||||
|
||||
auto ConvParams::is_dilated() const -> bool {
|
||||
bool is_dilated = false;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
||||
for (int d : dilation) {
|
||||
is_dilated |= (d != 1);
|
||||
}
|
||||
@ -90,6 +94,7 @@ auto ConvParams::is_dilated() const -> bool {
|
||||
|
||||
auto ConvParams::is_padded() const -> bool {
|
||||
bool is_padded = false;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
||||
for (int p : padding) {
|
||||
is_padded |= (p != 0);
|
||||
}
|
||||
@ -98,6 +103,7 @@ auto ConvParams::is_padded() const -> bool {
|
||||
|
||||
auto ConvParams::is_output_padding_neg() const -> bool {
|
||||
bool is_non_neg = false;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
||||
for (int p : output_padding) {
|
||||
is_non_neg |= (p < 0);
|
||||
}
|
||||
@ -114,6 +120,7 @@ auto ConvParams::is_output_padding_big() const -> bool {
|
||||
|
||||
auto ConvParams::is_padding_neg() const -> bool {
|
||||
bool is_non_neg = false;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
||||
for (int p : padding) {
|
||||
is_non_neg |= (p < 0);
|
||||
}
|
||||
@ -122,6 +129,7 @@ auto ConvParams::is_padding_neg() const -> bool {
|
||||
|
||||
auto ConvParams::is_stride_nonpos() const -> bool {
|
||||
bool is_nonpos = false;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
||||
for (int s : stride) {
|
||||
is_nonpos |= (s <= 0);
|
||||
}
|
||||
@ -246,11 +254,13 @@ auto ConvParams::use_mkldnn(const at::Tensor& input, const at::Tensor& weight) c
|
||||
!transposed && // or transposed tensors
|
||||
// For 1x1 filters, MKLDNN is faster than THNN when multi-threaded,
|
||||
// but THNN is faster when single-threaded.
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
(is_strided() || is_dilated() || input.size(0) >= 16 ||
|
||||
weight.size(-1) != 1 || weight.size(-2) != 1 || at::get_num_threads() > 1) &&
|
||||
(groups > 1
|
||||
|| (weight.size(-1) > 3 && weight.size(-2) > 3)
|
||||
|| input.size(0) > 1
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
|| input.size(0)*input.size(1)*input.size(2)*input.size(3) > 20480) // for some case, native is faster
|
||||
);
|
||||
|
||||
@ -267,8 +277,10 @@ auto ConvParams::use_nnpack(const at::Tensor& input, const at::Tensor& weight) c
|
||||
!transposed && // or transposed tensors
|
||||
input.ndimension() == 4 && // must be in NCHW format
|
||||
weight.ndimension() == 4 &&
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
(weight.size(2) < 17) && (weight.size(3) < 17) // NNPACK only supports kernels up to 16x16
|
||||
#if !defined(C10_MOBILE)
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
&& input.size(0) >= 16 // ensure large enough batch size to ensure perf, tuneable
|
||||
#endif
|
||||
;
|
||||
@ -304,6 +316,7 @@ auto ConvParams::is_depthwise(
|
||||
const at::Tensor& input, const at::Tensor& weight) const -> bool {
|
||||
return input.is_cuda() &&
|
||||
!transposed &&
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
(input.ndimension() == 4 || input.ndimension() == 5) &&
|
||||
input.size(1) == groups &&
|
||||
groups > 1 && // no point if there is only a single group
|
||||
@ -316,102 +329,145 @@ bool check_cudnn_depthwise_workload(const at::Tensor& input, int stride) {
|
||||
int ch = input.size(1);
|
||||
int bs = input.size(0);
|
||||
if (stride==1) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (w >= 7) {
|
||||
// All batch sizes and nb_channels
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (w >= 112) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// large nb_channels
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (ch >= 1024) {
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
|
||||
if (w >= 56) {
|
||||
return true;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (bs >= 32) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// batch_size specific
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (bs >= 128) {
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
|
||||
if (ch >= 512) {
|
||||
return true;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (ch >= 64) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (w >= 14) {
|
||||
return true;
|
||||
}
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if ((ch >= 32) && (w >=28)) {
|
||||
return true;
|
||||
}
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (bs >= 64) {
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
|
||||
if ((ch >= 256) && (w >= 14)) {
|
||||
return true;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if ((ch >= 32) && (w >= 28)) {
|
||||
return true;
|
||||
}
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (bs >= 32) {
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
|
||||
if ((ch >= 256) && (w >= 14)) {
|
||||
return true;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if ((ch >= 128) && (w >= 28)) {
|
||||
return true;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if ((ch >= 32) && (w >= 56)) {
|
||||
return true;
|
||||
}
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (bs >= 16) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if ((ch >= 1024) && (w >= 14)) {
|
||||
return true;
|
||||
}
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
|
||||
if ((ch >= 256) && (w >= 28)) {
|
||||
return true;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if ((ch >= 32) && (w >= 56)) {
|
||||
return true;
|
||||
}
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (bs >= 8) {
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
|
||||
if ((ch >= 512) && (w >= 28)) {
|
||||
return true;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if ((ch >= 64) && (w >= 56)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (stride==2) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (ch < 256) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (w >= 7) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (bs >= 128) {
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
|
||||
if (ch >= 1024) {
|
||||
return true;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if ((ch >= 512) && (w >= 14)) {
|
||||
return true;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (w >= 28) {
|
||||
return true;
|
||||
}
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (bs >= 64) {
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
|
||||
if ((ch >= 512) && (w >= 14)) {
|
||||
return true;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (w >= 28) {
|
||||
return true;
|
||||
}
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (bs >= 32) {
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
|
||||
if ((ch >= 1024) && (w >= 14)) {
|
||||
return true;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (w >= 28) {
|
||||
return true;
|
||||
}
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (bs >= 16) {
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
|
||||
if ((ch >= 512) && (w >= 28)) {
|
||||
return true;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (w >= 56) {
|
||||
return true;
|
||||
}
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (bs >= 8) {
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
|
||||
if ((ch >= 1024) && (w >= 28)) {
|
||||
return true;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (w >= 56) {
|
||||
return true;
|
||||
}
|
||||
} else if (bs >= 1) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if ((ch >= 512) && (w >=112)) {
|
||||
return true;
|
||||
}
|
||||
@ -428,6 +484,7 @@ auto ConvParams::use_cudnn_depthwise(
|
||||
}
|
||||
if (detail::getCUDAHooks().supportsDepthwiseConvolutionWithCuDNN()) {
|
||||
long cudnn_version = detail::getCUDAHooks().versionCuDNN();
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
bool kernel_cond = (cudnn_version >= 7600 &&
|
||||
use_cudnn(input, weight) &&
|
||||
input.scalar_type() == kHalf && // only for FP16
|
||||
@ -435,10 +492,12 @@ auto ConvParams::use_cudnn_depthwise(
|
||||
is_depthwise(input, weight) &&
|
||||
input.ndimension() == 4 && // TODO: 5-D contiguous depthwise is not supported yet, need benchmarks
|
||||
weight.size(2) == weight.size(3) && // only square kernels
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
input.size(2) >= 7 && // min width/height 7
|
||||
!is_dilated() && // no dilation supported
|
||||
stride[0] == stride[1] && // equal strides
|
||||
((weight.size(3) == 3) || (weight.size(3) == 1)) &&
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
input.size(1) >= 32); // min 32 channels supported)
|
||||
if (kernel_cond) {
|
||||
return check_cudnn_depthwise_workload(input, stride[0]);
|
||||
@ -604,10 +663,13 @@ static Tensor convolution_same(
|
||||
k, "-dimensional weight", weight_sizes, ", but got ",
|
||||
input.dim(), "-dimensional input of size ",
|
||||
input.sizes(), " instead");
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
TORCH_CHECK(stride.size() == dim || stride.size() == 1,
|
||||
"stride cannot broadcast to ", dim, " dimensions");
|
||||
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
|
||||
TORCH_CHECK(dilation.size() == dim || dilation.size() == 1,
|
||||
"dilation cannot broadcast to ", dim, " dimensions");
|
||||
// NOLINTNEXTLINE(modernize-loop-convert,clang-diagnostic-sign-compare)
|
||||
for (int64_t i = 0; i < stride.size(); ++i) {
|
||||
TORCH_CHECK(stride[i] == 1, "padding='same' is not supported for strided convolutions");
|
||||
}
|
||||
@ -666,6 +728,7 @@ Tensor _convolution_mode(
|
||||
return at::native::convolution_same(
|
||||
input, weight, bias, stride, dilation, groups);
|
||||
} else if (padding == "valid") {
|
||||
// NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
|
||||
const int64_t padding_[] = {0};
|
||||
return at::native::convolution(
|
||||
input, weight, bias, stride, padding_, dilation, false, padding_, groups);
|
||||
@ -752,6 +815,7 @@ at::Tensor convolution_overrideable(
|
||||
bool transposed, IntArrayRef output_padding, int64_t groups) {
|
||||
// See [Note: hacky wrapper removal for optional tensor]
|
||||
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
|
||||
// NOLINTNEXTLINE(clang-diagnostic-unused-variable)
|
||||
const Tensor& bias = *bias_maybe_owned;
|
||||
|
||||
TORCH_CHECK_NOT_IMPLEMENTED(false, "convolution_overrideable not implemented. You are likely triggering this with tensor backend other than CPU/CUDA/MKLDNN, if this is intended, please use TORCH_LIBRARY_IMPL to override this function ");
|
||||
@ -831,6 +895,7 @@ at::Tensor _convolution(
|
||||
|
||||
at::MemoryFormat cudnn_memory_format = at::MemoryFormat::Contiguous;
|
||||
if (cudnn_conv_use_channels_last(input, weight)) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
cudnn_memory_format = (k == 5) ? at::MemoryFormat::ChannelsLast3d : at::MemoryFormat::ChannelsLast;
|
||||
}
|
||||
|
||||
@ -943,6 +1008,7 @@ at::Tensor _convolution(
|
||||
params.padding,
|
||||
params.groups);
|
||||
} else if (
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
!params.transposed && (input.ndimension() == 5) &&
|
||||
(input.device().is_cpu()) &&
|
||||
!params.is_dilated()) {
|
||||
@ -1026,6 +1092,7 @@ at::Tensor _convolution_nogroup(
|
||||
return at::slow_conv_transpose2d(
|
||||
input, weight, kernel_size, bias,
|
||||
stride, padding, output_padding, dilation);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (dim == 5) {
|
||||
return at::slow_conv_transpose3d(
|
||||
input, weight, kernel_size, bias,
|
||||
@ -1051,10 +1118,12 @@ at::Tensor _convolution_nogroup(
|
||||
stride, padding);
|
||||
}
|
||||
}
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (dim == 5 && (input.is_cuda() || dilated)) {
|
||||
return at::slow_conv_dilated3d(
|
||||
input, weight, kernel_size, bias,
|
||||
stride, padding, dilation);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (dim == 5) { /* dim == 5, CPU, non-dilated */
|
||||
/* CPU implementation has specialized MM kernels
|
||||
for non-dilated case here */
|
||||
@ -1113,6 +1182,7 @@ std::tuple<Tensor,Tensor,Tensor> _convolution_double_backward( const c10::option
|
||||
// TODO: hacky way of inferring the groups number for grouped Conv3D
|
||||
// See: https://github.com/pytorch/pytorch/pull/36355
|
||||
if (!params.transposed && input.dim() > 4) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
||||
params.groups = input.size(1) / weight.size(1);
|
||||
} else {
|
||||
params.groups = groups_;
|
||||
|
@ -419,6 +419,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> slow_conv2d_forward_out_cpu(const Tensor&
|
||||
false);
|
||||
|
||||
const Tensor input = self.contiguous();
|
||||
// NOLINTNEXTLINE(clang-diagnostic-unused-variable,clang-analyzer-deadcode.DeadStores)
|
||||
const int64_t ndim = input.dim();
|
||||
const int64_t dim_planes = 1;
|
||||
const int64_t dim_height = 2;
|
||||
@ -522,6 +523,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> slow_conv2d_backward_out_cpu(const Tensor&
|
||||
self,
|
||||
weight,
|
||||
finput,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
const_cast<Tensor&>(fgrad_input), // cast away auto-generated const of buffer
|
||||
kernel_size,
|
||||
stride,
|
||||
|
@ -67,6 +67,7 @@ static inline void slow_conv3d_shape_check(
|
||||
const int64_t dim_width = 4;
|
||||
|
||||
// Allow for empty batch size but not other dimensions
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
bool valid_empty = ndim == 5 && input.size(dim_batch) == 0 &&
|
||||
input.size(dim_planes) != 0 && input.size(dim_depth) != 0 &&
|
||||
input.size(dim_height) != 0 && input.size(dim_width) != 0;
|
||||
@ -155,6 +156,7 @@ static inline void slow_conv3d_shape_check(
|
||||
|
||||
static Tensor view_weight_2d(const Tensor& weight_) {
|
||||
Tensor weight = weight_.contiguous();
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (weight.dim() == 5) {
|
||||
const int64_t s1 = weight.size(0);
|
||||
const int64_t s2 =
|
||||
@ -564,6 +566,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> slow_conv3d_forward_out_cpu(const Tensor&
|
||||
const Tensor input = self.contiguous();
|
||||
const Tensor weight_2d = view_weight_2d(weight);
|
||||
|
||||
// NOLINTNEXTLINE(clang-diagnostic-unused-variable,clang-analyzer-deadcode.DeadStores)
|
||||
const int64_t ndim = input.dim();
|
||||
const int64_t dim_planes = 1;
|
||||
const int64_t dim_depth = 2;
|
||||
@ -678,6 +681,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> slow_conv3d_backward_out_cpu(const Tensor&
|
||||
self,
|
||||
weight,
|
||||
finput,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
const_cast<Tensor&>(
|
||||
fgrad_input), // cast away auto-generated const of buffer
|
||||
kernel_size,
|
||||
|
@ -42,6 +42,7 @@ Tensor conv_tbc(const Tensor& self, const Tensor& weight, const Tensor& bias, in
|
||||
for (int k = 0; k < kw; k++) {
|
||||
int iShift = std::max(0, static_cast<int>(k - real_pad));
|
||||
int oShift = std::max(0, static_cast<int>(real_pad - k));
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
int t = std::min(ilen + real_pad - k, olen) - oShift;
|
||||
// Note: gemm assumes column-major matrices
|
||||
// input is l*m (row-major)
|
||||
@ -67,12 +68,14 @@ std::tuple<Tensor, Tensor, Tensor> conv_tbc_backward(const Tensor& dOutput, cons
|
||||
auto outputPlanes = weight_size[2];
|
||||
auto kw = weight.sizes()[0];
|
||||
auto olen = input_size[0] - kw + 1 + pad * 2;
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
int real_pad = (olen - ilen + kw - 1) / 2;
|
||||
|
||||
Tensor dInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
|
||||
for (int k = 0; k < kw; k++) {
|
||||
int iShift = std::max(0, k - real_pad);
|
||||
int oShift = std::max(0, real_pad - k);
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
int t = std::min(ilen + real_pad - k, olen) - oShift;
|
||||
// dOutput * T(weight) -> dInput
|
||||
if (t > 0) {
|
||||
@ -86,6 +89,7 @@ std::tuple<Tensor, Tensor, Tensor> conv_tbc_backward(const Tensor& dOutput, cons
|
||||
for (int k = 0; k < kw; k++) {
|
||||
int iShift = std::max(0, k - real_pad);
|
||||
int oShift = std::max(0, real_pad - k);
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
int t = std::min(ilen + real_pad - k, olen) - oShift;
|
||||
// T(input) * dOutput -> dWeight
|
||||
if (t > 0) {
|
||||
|
@ -34,10 +34,13 @@ bool copy_transpose_valid(const Tensor& self, const Tensor& src) {
|
||||
// special case copy where tensor is contiguous and src is a transposed matrix
|
||||
// This can be generalized to most copies, but it's trickier
|
||||
void copy_same_type_transpose_(Tensor& self, const Tensor& src) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t BLOCK_SZ;
|
||||
if (self.scalar_type() == kByte) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
BLOCK_SZ = 120;
|
||||
} else {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
BLOCK_SZ = 60;
|
||||
}
|
||||
Tensor buf = empty({BLOCK_SZ, BLOCK_SZ}, self.options());
|
||||
@ -250,6 +253,7 @@ Tensor& copy_(Tensor& self, const Tensor& src, bool non_blocking) {
|
||||
return self;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(copy_stub);
|
||||
|
||||
} // namespace native
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
namespace at { namespace native {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(cross_stub);
|
||||
|
||||
Tensor cross(const Tensor & input, const Tensor & other, const c10::optional<int64_t> dimension) {
|
||||
|
@ -136,7 +136,9 @@ Tensor& max_pool2d_with_indices_backward_out_cpu_template(
|
||||
const int64_t nInputPlane = input.size(-3);
|
||||
const int64_t inputHeight = input.size(-2);
|
||||
const int64_t inputWidth = input.size(-1);
|
||||
// NOLINTNEXTLINE(clang-diagnostic-unused-variable,clang-analyzer-deadcode.DeadStores)
|
||||
const int64_t outputHeight = gradOutput.size(-2);
|
||||
// NOLINTNEXTLINE(clang-diagnostic-unused-variable,clang-analyzer-deadcode.DeadStores)
|
||||
const int64_t outputWidth = gradOutput.size(-1);
|
||||
|
||||
/* XXX preserve the existing shape check behavior */
|
||||
@ -259,7 +261,9 @@ Tensor max_pool2d_with_indices_backward_cpu(
|
||||
return gradInput;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(max_pool2d_kernel);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(max_pool2d_backward_kernel);
|
||||
|
||||
} // at::native
|
||||
|
@ -40,6 +40,7 @@ static void max_pool3d_with_indices_single_out_frame(
|
||||
for (auto k = start; k < end; k++)
|
||||
{
|
||||
/* loop over output */
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t i, j, ti;
|
||||
scalar_t *ip = input_p + k * itime * iwidth * iheight;
|
||||
for (ti = 0; ti < otime; ti++)
|
||||
@ -290,6 +291,7 @@ static void max_pool3d_with_indices_backward_single_out_frame(
|
||||
int64_t *indz_p_k = indz_p + k * otime * owidth * oheight;
|
||||
|
||||
/* calculate max points */
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t ti, i, j;
|
||||
for (ti = 0; ti < otime; ti++)
|
||||
{
|
||||
|
@ -8,9 +8,13 @@
|
||||
|
||||
namespace at { namespace native {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(pdist_forward_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(pdist_backward_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(cdist_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(cdist_backward_stub);
|
||||
|
||||
Tensor pairwise_distance(const Tensor& x1, const Tensor& x2, double p, double eps, bool keepdim) {
|
||||
@ -64,6 +68,7 @@ static Tensor cdist_impl(const Tensor& x1, const Tensor& x2, const double p, c10
|
||||
|
||||
// See Note [cdist relies on cdist_impl redispatching]
|
||||
// Keep this condition in sync with the condition at the Note
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (!(p == 2 && (mode == 1 || (mode == 0 && (r1 > 25 || r2 > 25))))) {
|
||||
TORCH_CHECK(device1 == kCPU || device1 == kCUDA, "cdist only supports CPU and CUDA devices, X1 got: ", device1);
|
||||
TORCH_CHECK(device2 == kCPU || device2 == kCUDA, "cdist only supports CPU and CUDA devices, X2 got: ", device2);
|
||||
@ -97,6 +102,7 @@ static Tensor cdist_impl(const Tensor& x1, const Tensor& x2, const double p, c10
|
||||
result = at::empty(output_shape, x1.options());
|
||||
} else if (c1 == 0) {
|
||||
result = at::zeros(output_shape, x1.options());
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
} else if (p == 2 && (mode == 1 || (mode == 0 && (r1 > 25 || r2 > 25)))) {
|
||||
// See Note [cdist relies on cdist_impl redispatching]
|
||||
// Keep the condition above in sync with the condition at the Note
|
||||
@ -128,6 +134,7 @@ Tensor cdist(const Tensor& x1, const Tensor& x2, const double p, c10::optional<i
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
// This is for pytorch to figure the backward pass itself
|
||||
// when p=2. Keep this condition in sync with the See Note reference
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (p == 2 && (mode == 1 || (mode == 0 && (r1 > 25 || r2 > 25)))) {
|
||||
return cdist_impl(x1, x2, p, compute_mode);
|
||||
} else {
|
||||
|
@ -38,6 +38,7 @@ int64_t update_from(int64_t from) {
|
||||
int64_t from_ = std::abs(from + 1);
|
||||
int n = 0;
|
||||
while (from_ >>= 1) ++n;
|
||||
// NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult)
|
||||
from = from_plus_1 + (1LL << (n - std::numeric_limits<scalar_t>::digits + 1));
|
||||
}
|
||||
return from;
|
||||
@ -54,6 +55,7 @@ int64_t update_to(int64_t to) {
|
||||
int64_t to_ = std::abs(to - 1);
|
||||
int n = 0;
|
||||
while (to_ >>= 1) ++n;
|
||||
// NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult)
|
||||
to = to_minus_1 - (1LL << (n - std::numeric_limits<scalar_t>::digits + 1));
|
||||
}
|
||||
return to;
|
||||
|
@ -21,7 +21,9 @@
|
||||
|
||||
#include <type_traits>
|
||||
#include <functional>
|
||||
// NOLINTNEXTLINE(modernize-deprecated-headers)
|
||||
#include <assert.h>
|
||||
// NOLINTNEXTLINE(modernize-deprecated-headers)
|
||||
#include <float.h>
|
||||
|
||||
namespace {
|
||||
@ -60,26 +62,39 @@ namespace {
|
||||
int64_t sample_poisson(double lambda, at::CPUGeneratorImpl* generator) {
|
||||
TORCH_CHECK(lambda >= 0, "invalid Poisson rate, expected rate to be non-negative");
|
||||
at::uniform_real_distribution<double> standard_uniform(0.0, 1.0);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (lambda >= 10) {
|
||||
// transformed rejection method, (Hoermann, 1993)
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t k;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
double U, V, a, b, invalpha, vr, us;
|
||||
|
||||
double slam = std::sqrt(lambda);
|
||||
double loglam = std::log(lambda);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
b = 0.931 + 2.53 * slam;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
a = -0.059 + 0.02483 * b;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
invalpha = 1.1239 + 1.1328 / (b - 3.4);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
vr = 0.9277 - 3.6224 / (b - 2);
|
||||
|
||||
// NOLINTNEXTLINE(modernize-use-bool-literals)
|
||||
while (1) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
U = standard_uniform(generator) - 0.5;
|
||||
V = standard_uniform(generator);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
us = 0.5 - std::fabs(U);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
k = (int64_t)std::floor((2 * a / us + b) * U + lambda + 0.43);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if ((us >= 0.07) && (V <= vr)) {
|
||||
return k;
|
||||
}
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if ((k < 0) || ((us < 0.013) && (V > us))) {
|
||||
continue;
|
||||
}
|
||||
@ -91,12 +106,15 @@ int64_t sample_poisson(double lambda, at::CPUGeneratorImpl* generator) {
|
||||
} else if (lambda == 0) {
|
||||
return 0;
|
||||
} else {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t X;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
double prod, U, enlam;
|
||||
|
||||
enlam = std::exp(-lambda);
|
||||
X = 0;
|
||||
prod = 1.0;
|
||||
// NOLINTNEXTLINE(modernize-use-bool-literals)
|
||||
while (1) {
|
||||
U = standard_uniform(generator);
|
||||
prod *= U;
|
||||
@ -114,17 +132,29 @@ int64_t sample_poisson(double lambda, at::CPUGeneratorImpl* generator) {
|
||||
namespace at {
|
||||
namespace native {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(bernoulli_tensor_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(bernoulli_scalar_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(cauchy_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(exponential_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(multinomial_with_replacement_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(geometric_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(log_normal_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(uniform_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(normal_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(random_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(random_from_to_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(random_full_64_bits_range_stub);
|
||||
|
||||
// ==================================================== Bernoulli =====================================================
|
||||
@ -543,6 +573,7 @@ Tensor& multinomial_out(const Tensor& self,
|
||||
TORCH_CHECK(
|
||||
is_valid.to<bool>(),
|
||||
"probability tensor contains either `inf`, `nan` or element < 0");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
bool zero_prob_condition;
|
||||
if (self.dim() == 1){
|
||||
zero_prob_condition = (self.sum() == 0).item().to<bool>();
|
||||
|
@ -96,6 +96,7 @@ Tensor embedding_dense_backward_cpu(
|
||||
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cpu", [&] () {
|
||||
auto indices_data = indices_contig.data_ptr<index_t>();
|
||||
|
||||
// NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
|
||||
std::unique_ptr<index_t[]> counts;
|
||||
if (scale_grad_by_freq) {
|
||||
counts.reset(new index_t[num_weights]);
|
||||
@ -114,6 +115,7 @@ Tensor embedding_dense_backward_cpu(
|
||||
if (k >= start && k < end) {
|
||||
double scale = 1.0;
|
||||
if (scale_grad_by_freq) {
|
||||
// NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
|
||||
scale /= counts[k];
|
||||
}
|
||||
grad_weight[k].add_(grad[i], scale);
|
||||
@ -156,6 +158,7 @@ Tensor & embedding_renorm_cpu_(
|
||||
auto row = self[sorted_indices[i]];
|
||||
auto norm = row.norm(norm_type).item<double>();
|
||||
if (norm > max_norm) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto scale = max_norm / (norm + 1e-7);
|
||||
row *= scale;
|
||||
}
|
||||
|
@ -83,6 +83,7 @@ index_select_add(const Tensor &select_indices,
|
||||
auto* select_indices_data = select_indices.data_ptr<index_t>();
|
||||
auto* src_data = src.data_ptr<data_t>();
|
||||
auto* output_data = output.data_ptr<data_t>();
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
index_t* bag_size_data;
|
||||
if (bag_size.defined()) {
|
||||
bag_size_data = bag_size.data_ptr<index_t>();
|
||||
@ -103,6 +104,7 @@ index_select_add(const Tensor &select_indices,
|
||||
output_data + output_stride0 * add_indices_data[i], output_stride1);
|
||||
} else if (bag_size.defined()) {
|
||||
// Decrement bag_size to reflect that the index is padded
|
||||
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
|
||||
bag_size_data[add_indices_data[i]]--;
|
||||
}
|
||||
}
|
||||
@ -148,6 +150,7 @@ index_select_add(const Tensor &select_indices,
|
||||
/* block_size */ddim,
|
||||
/* has_weight */false,
|
||||
/* normalize_by_lengths */false,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
/* prefetch */16,
|
||||
/* is_weight_positional */false,
|
||||
/* use_offsets */true
|
||||
@ -184,6 +187,7 @@ index_select_add(const Tensor &select_indices,
|
||||
AT_ASSERT(select_indices.numel() == add_indices.numel());
|
||||
auto* src_data = src.data_ptr<float>();
|
||||
auto* add_indices_data = add_indices.data_ptr<index_t>();
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
index_t* bag_size_data;
|
||||
if (bag_size.defined()) {
|
||||
bag_size_data = bag_size.data_ptr<index_t>();
|
||||
@ -206,6 +210,7 @@ index_select_add(const Tensor &select_indices,
|
||||
output_stride1);
|
||||
} else if (bag_size.defined()) {
|
||||
// Decrement bag_size to reflect that the index is padded
|
||||
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
|
||||
bag_size_data[add_indices_data[i]]--;
|
||||
}
|
||||
}
|
||||
@ -232,6 +237,7 @@ index_select_scale_add(const Tensor &select_indices,
|
||||
auto* select_indices_data = select_indices.data_ptr<index_t>();
|
||||
auto* src_data = src.data_ptr<data_t>();
|
||||
auto* output_data = output.data_ptr<data_t>();
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
index_t* bag_size_data;
|
||||
if (bag_size.defined()) {
|
||||
bag_size_data = bag_size.data_ptr<index_t>();
|
||||
@ -258,6 +264,7 @@ index_select_scale_add(const Tensor &select_indices,
|
||||
}
|
||||
} else if (bag_size.defined()) {
|
||||
// Decrement bag_size to reflect that the index is padded
|
||||
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
|
||||
bag_size_data[add_indices_data[i]]--;
|
||||
}
|
||||
}
|
||||
@ -305,6 +312,7 @@ index_select_scale_add(const Tensor &select_indices,
|
||||
/* block_size */ddim,
|
||||
/* has_weight */true,
|
||||
/* normalize_by_lengths */false,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
/* prefetch */16,
|
||||
/* is_weight_positional */false,
|
||||
/* use_offsets */true
|
||||
@ -341,6 +349,7 @@ index_select_scale_add(const Tensor &select_indices,
|
||||
AT_ASSERT(select_indices.numel() == add_indices.numel());
|
||||
auto* src_data = src.data_ptr<float>();
|
||||
auto* add_indices_data = add_indices.data_ptr<index_t>();
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
index_t* bag_size_data;
|
||||
if (bag_size.defined()) {
|
||||
bag_size_data = bag_size.data_ptr<index_t>();
|
||||
@ -365,6 +374,7 @@ index_select_scale_add(const Tensor &select_indices,
|
||||
}
|
||||
} else if (bag_size.defined()) {
|
||||
// Decrement bag_size to reflect that the index is padded
|
||||
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
|
||||
bag_size_data[add_indices_data[i]]--;
|
||||
}
|
||||
}
|
||||
@ -884,6 +894,7 @@ void _embedding_bag_dense_backward_cpu_sum_mean(
|
||||
Tensor& index_grad_weight,
|
||||
int64_t padding_idx) {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
Tensor &offset2bag_ = const_cast<Tensor &>(offset2bag__);
|
||||
|
||||
auto ind_sort_ = indices_.sort();
|
||||
@ -892,6 +903,7 @@ void _embedding_bag_dense_backward_cpu_sum_mean(
|
||||
auto offset2bag = offset2bag_.index_select(0, ind_sort);
|
||||
|
||||
optional<Tensor> per_sample_weights;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
scalar_t* per_sample_weights_data;
|
||||
optional<int64_t> per_sample_weights_stride;
|
||||
if (per_sample_weights_.defined()) {
|
||||
@ -1109,6 +1121,7 @@ Tensor _embedding_bag_sparse_backward(
|
||||
// Also see NOTE [ embedding_bag Native Functions ] in native_functions.yaml
|
||||
// for more details.
|
||||
|
||||
// NOLINTNEXTLINE(performance-unnecessary-copy-initialization)
|
||||
Tensor grad = grad_;
|
||||
Tensor index_grad = grad_.index_select(0, offset2bag);
|
||||
|
||||
|
@ -50,6 +50,7 @@ Tensor& fill_meta_(Tensor& self, const Tensor& value) {
|
||||
return self;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(fill_stub);
|
||||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ fill_diagonal ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -112,6 +112,7 @@ static void fractional_max_pool2d_out_single_batch_frame(
|
||||
randomSamplesForPlane[1], inputH, outputH, poolSizeH);
|
||||
|
||||
/* loop over output */
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int h, w;
|
||||
|
||||
scalar_t* inputForPlane = input + plane * inputW * inputH;
|
||||
@ -253,6 +254,7 @@ static void fractional_max_pool2d_backward_out_single_batch_frame(
|
||||
scalar_t* gradOutputForPlane = gradOutput + plane * outputW * outputH;
|
||||
int64_t* indicesForPlane = indices + plane * outputW * outputH;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int h, w;
|
||||
for (h = 0; h < outputH; ++h) {
|
||||
for (w = 0; w < outputW; ++w) {
|
||||
|
@ -58,6 +58,7 @@ static void fractional_max_pool3d_out_single_batch_frame(
|
||||
randomSamplesForPlane[2], inputW, outputW, poolSizeW);
|
||||
|
||||
/* loop over output */
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t t, h, w;
|
||||
|
||||
scalar_t* inputForPlane = input + plane * inputT * inputH * inputW;
|
||||
@ -171,6 +172,7 @@ void fractional_max_pool3d_out_cpu_template(
|
||||
"fractional_max_pool3d_out(): non-empty 4D or 5D (batch mode) tensor ",
|
||||
" expected for input, but got: ", ndims);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (ndims == 5) {
|
||||
numBatch = input_.size(0);
|
||||
planeDim++;
|
||||
@ -242,6 +244,7 @@ static void fractional_max_pool3d_backward_out_single_batch_frame(
|
||||
plane * outputT * outputH * outputW;
|
||||
int64_t* indicesForPlane = indices + plane * outputT * outputH * outputW;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t h, w, t;
|
||||
for (t = 0; t < outputT; ++t) {
|
||||
for (h = 0; h < outputH; ++h) {
|
||||
@ -309,6 +312,7 @@ void fractional_max_pool3d_backward_out_cpu_template(
|
||||
int64_t widthDim = 3;
|
||||
|
||||
int64_t ndims = input.ndimension();
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (ndims == 5) {
|
||||
numBatch = input.size(0);
|
||||
planeDim = 1;
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
namespace at { namespace native {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(_compute_linear_combination_stub);
|
||||
|
||||
// If `coefficients` is a [m, n] Tensor and
|
||||
@ -15,6 +16,7 @@ DEFINE_DISPATCH(_compute_linear_combination_stub);
|
||||
// This is relevant when scalar_t<T> == complex<T>.
|
||||
Tensor _compute_linear_combination(const Tensor& input, const Tensor& coefficients) {
|
||||
auto output_first_dim_size = coefficients.size(0);
|
||||
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores,clang-diagnostic-unused-variable)
|
||||
auto input_first_dim_size = coefficients.size(1);
|
||||
|
||||
auto output_sizes = input.sizes().vec();
|
||||
|
@ -6,7 +6,9 @@
|
||||
namespace at {
|
||||
namespace native {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(glu_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(glu_backward_stub);
|
||||
|
||||
Tensor& glu_out(const Tensor& self, int64_t dim, Tensor &result) {
|
||||
|
@ -501,6 +501,7 @@ Tensor _grid_sampler_2d_cpu_fallback(const Tensor& input, const Tensor& grid,
|
||||
scalar_t *inp_ptr_NC = inp_ptr_N;
|
||||
scalar_t *out_ptr_NCHW = out_ptr + n * out_sN + h * out_sH + w * out_sW;
|
||||
for (int64_t c = 0; c < C; ++c, out_ptr_NCHW += out_sC, inp_ptr_NC += inp_sC) {
|
||||
// NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
|
||||
scalar_t coefficients[4];
|
||||
|
||||
// Interpolate 4 values in the x directon
|
||||
@ -589,6 +590,7 @@ _grid_sampler_2d_cpu_fallback_backward(const Tensor& grad_output,
|
||||
scalar_t y = grid_ptr_NHW[grid_sCoor];
|
||||
|
||||
// multipliers for gradients on ix, iy
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
scalar_t gix_mult, giy_mult;
|
||||
scalar_t ix = grid_sampler_compute_source_index_set_grad(x, inp_W, padding_mode, align_corners, &gix_mult);
|
||||
scalar_t iy = grid_sampler_compute_source_index_set_grad(y, inp_H, padding_mode, align_corners, &giy_mult);
|
||||
@ -677,9 +679,13 @@ _grid_sampler_2d_cpu_fallback_backward(const Tensor& grad_output,
|
||||
const scalar_t tx = ix - ix_nw;
|
||||
const scalar_t ty = iy - iy_nw;
|
||||
|
||||
// NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
|
||||
scalar_t x_coeffs[4];
|
||||
// NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
|
||||
scalar_t y_coeffs[4];
|
||||
// NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
|
||||
scalar_t x_coeffs_grad[4];
|
||||
// NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
|
||||
scalar_t y_coeffs_grad[4];
|
||||
|
||||
get_cubic_upsample_coefficients<scalar_t>(x_coeffs, tx);
|
||||
@ -752,6 +758,7 @@ Tensor grid_sampler_2d_cpu(const Tensor& input, const Tensor& grid,
|
||||
kCPU, input, grid, interpolation_mode, padding_mode, align_corners);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(grid_sampler_2d_cpu_kernel);
|
||||
|
||||
|
||||
@ -798,6 +805,7 @@ grid_sampler_2d_backward_cpu(const Tensor& grad_output, const Tensor& input, con
|
||||
kCPU, grad_output, input, grid, interpolation_mode, padding_mode, align_corners);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(grid_sampler_2d_backward_cpu_kernel);
|
||||
|
||||
// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ].
|
||||
@ -865,6 +873,7 @@ Tensor grid_sampler(const Tensor& input, const Tensor& grid,
|
||||
static_cast<GridSamplerPadding>(padding_mode) == GridSamplerPadding::Zeros &&
|
||||
align_corners &&
|
||||
input.dim() == 4 &&
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
input.size(1) <= 1024) {
|
||||
return cudnn_grid_sampler(input, grid);
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ bool canUse32BitIndexMath(const Tensor& t, int64_t max_elem) {
|
||||
int64_t linearId = elements - 1;
|
||||
|
||||
// NOTE: Assumes all strides are positive, which is true for now
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
for (int i = t.dim() - 1; i >= 0; --i) {
|
||||
int64_t curDimIndex = linearId % t.size(i);
|
||||
int64_t curDimOffset = curDimIndex * t.stride(i);
|
||||
|
@ -20,12 +20,14 @@ Tensor do_trapz(const Tensor& y, const Tensor& dx, int64_t dim) {
|
||||
Tensor left = y.slice(dim, 0, -1);
|
||||
Tensor right = y.slice(dim, 1);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
return ((left + right) * dx).sum(dim) / 2.;
|
||||
}
|
||||
|
||||
// When dx is constant, the above formula simplifies
|
||||
// to dx * [(\sum_{i=1}^n y_i) - (y_1 + y_n)/2]
|
||||
Tensor do_trapz(const Tensor& y, double dx, int64_t dim) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
return (y.sum(dim) - (y.select(dim, 0) + y.select(dim, -1)) * (0.5)) * dx;
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,9 @@ Tensor lerp_cpu_scalar(const Tensor& self, const Tensor& end, const Scalar& weig
|
||||
return result;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(lerp_kernel_scalar_weight);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(lerp_kernel_tensor_weight);
|
||||
|
||||
} // namespace native
|
||||
|
@ -92,6 +92,7 @@ static Tensor sumproduct_pair(const Tensor& left_, const Tensor& right_, IntArra
|
||||
// then the permuted output is a view of bmm(left, right)
|
||||
// finally, opermutation reverts the permutation to the original order of dimensions
|
||||
std::vector<int64_t> out_size;
|
||||
// NOLINTNEXTLINE(performance-inefficient-vector-operation)
|
||||
for (auto& d : lro) out_size.push_back(left.size(d));
|
||||
for (auto& d : lo) out_size.push_back(left.size(d));
|
||||
for (auto& d : sum_dims_) { out_size.push_back(1); (void)(d); }; // avoid warining about not using d
|
||||
@ -134,6 +135,7 @@ static Tensor sumproduct_pair(const Tensor& left_, const Tensor& right_, IntArra
|
||||
// finally squeeze summed dimensions if desired
|
||||
if (! keepdim) {
|
||||
auto sizes = result.sizes().vec();
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
for (int i = dim-1; i>=0; i--) {
|
||||
if (sum_dims[i]) {
|
||||
sizes.erase(sizes.begin() + i);
|
||||
|
@ -26,7 +26,9 @@
|
||||
namespace at {
|
||||
namespace native {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(addr_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(linalg_vector_norm_stub);
|
||||
|
||||
// Helper function for det methods.
|
||||
@ -90,6 +92,7 @@ Tensor logdet(const Tensor& self) {
|
||||
Tensor logdet_vals = diag_U.abs_().log_().sum(-1);
|
||||
if (self.dim() > 2) {
|
||||
auto indices = toListOfOptionalTensors((det_sign < 0).nonzero_numpy());
|
||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
||||
logdet_vals.index_put_(std::move(indices), at::full({}, NAN, self.options()));
|
||||
} else if (det_sign.item<double>() < 0) {
|
||||
logdet_vals.fill_(NAN);
|
||||
@ -1206,6 +1209,7 @@ static inline Tensor& bmm_out_or_baddbmm_(Tensor& self_or_result, const Tensor&
|
||||
|| (strides[1] == 1 && strides[2] >= sizes[1]);
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (contraction_size * res_rows * res_cols < 400) {
|
||||
if (is_bmm_out) {
|
||||
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, batch1.scalar_type(), "bmm", [&] {
|
||||
@ -1570,6 +1574,7 @@ Tensor compute_T2(const Tensor& A) {
|
||||
auto As = _allocate_buffer(A, 3);
|
||||
// 3 for {I, A, A^2}
|
||||
_fill_matrix_powers(As, A, 3);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
As.select(0, 2).div_(2.0);
|
||||
return As.sum(0);
|
||||
}
|
||||
@ -1589,6 +1594,7 @@ Tensor compute_T4(const Tensor& A) {
|
||||
// computes (I / 2 + A / 6 + A^2 / 24)
|
||||
at::native::_compute_linear_combination(
|
||||
As.narrow(0, 0, 3),
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
_blob_to_Tensor<scalar_t>({1 / 2.0, 1 / 6.0, 1 / 24.0}, A)
|
||||
)
|
||||
);
|
||||
@ -1611,6 +1617,7 @@ Tensor compute_T8(const Tensor& A) {
|
||||
constexpr scalar_t x7 = (89. - sqrt_177) / (5040. * x3);
|
||||
constexpr scalar_t y2 = (857. - 58. * sqrt_177) / 630.;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto As = _allocate_buffer(A, 5);
|
||||
// 3 for {I, A, A^2}
|
||||
_fill_matrix_powers(As, A, 3);
|
||||
@ -1655,27 +1662,43 @@ Tensor compute_T12(const Tensor& A) {
|
||||
constexpr int num_prods = 4;
|
||||
array2d<scalar_t, num_prods, num_prods> b = {{
|
||||
{
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
9.0198e-16,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
0.46932117595418237389,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
-0.20099424927047284052,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
-0.04623946134063071740
|
||||
},
|
||||
{
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
5.31597895759871264183,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
1.19926790417132231573,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
0.01179296240992997031,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
0.01108844528519167989
|
||||
},
|
||||
{
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
0.18188869982170434744,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
0.05502798439925399070,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
0.09351590770535414968,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
0.00610700528898058230
|
||||
},
|
||||
{
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
-2.0861320e-13,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
-0.13181061013830184015,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
-0.02027855540589259079,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
-0.00675951846863086359
|
||||
}
|
||||
}};
|
||||
@ -1717,37 +1740,57 @@ Tensor compute_T18(const Tensor& A) {
|
||||
array2d<scalar_t, num_prods, num_prods> b = {{
|
||||
{
|
||||
0.,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
-1.00365581030144618291e-01,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
-8.02924648241156932449e-03,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
-8.92138498045729985177e-04,
|
||||
0.
|
||||
},
|
||||
{
|
||||
0.,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
3.97849749499645077844e-01,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
1.36783778460411720168e+00,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
4.98289622525382669416e-01,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
-6.37898194594723280150e-04
|
||||
},
|
||||
{
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
-1.09676396052962061844e+01,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
1.68015813878906206114e+00,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
5.71779846478865511061e-02,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
-6.98210122488052056106e-03,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
3.34975017086070470649e-05
|
||||
},
|
||||
{
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
-9.04316832390810593223e-02,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
-6.76404519071381882256e-02,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
6.75961301770459654925e-02,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
2.95552570429315521194e-02,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
-1.39180257516060693404e-05
|
||||
},
|
||||
{
|
||||
0.,
|
||||
0.,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
-9.23364619367118555360e-02,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
-1.69364939002081722752e-02,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
-1.40086798182036094347e-05
|
||||
}
|
||||
}};
|
||||
@ -2118,6 +2161,7 @@ static Tensor _norm_min_max(Tensor& self, double ord, int64_t dim, bool keepdim)
|
||||
static Tensor& _linalg_norm_matrix_out(Tensor& result, const Tensor &self, const optional<Scalar>& opt_ord,
|
||||
IntArrayRef dim, bool keepdim, optional<ScalarType> opt_dtype) {
|
||||
Tensor result_;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto ord = opt_ord.value_or(2.0).toDouble();
|
||||
TORCH_CHECK(self.layout() == Layout::Strided,
|
||||
"matrix norm only supports strided layout, got: ", self.layout());
|
||||
@ -2389,6 +2433,7 @@ void _linalg_cond_check_ord(c10::variant<Scalar, std::string> ord_variant) {
|
||||
if (ord_variant.index() == 0) {
|
||||
Scalar* ord = c10::get_if<Scalar>(&ord_variant);
|
||||
double abs_ord = std::abs(ord->toDouble());
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
TORCH_CHECK(abs_ord == 2.0 || abs_ord == 1.0 || abs_ord == INFINITY,
|
||||
"linalg_cond got an invalid norm type: ", ord->toDouble());
|
||||
} else if (ord_variant.index() == 1) {
|
||||
@ -2419,12 +2464,14 @@ Tensor linalg_cond(const Tensor& self, const optional<Scalar>& opt_ord) {
|
||||
}
|
||||
|
||||
// If ord == None or ord == ±2
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (std::abs(ord.toDouble()) == 2.0) {
|
||||
auto singular_values = std::get<1>(at::svd(self));
|
||||
// singular values are sorted in descending order
|
||||
auto s_max = at::narrow(singular_values, /*dim=*/-1, /*start=*/0, /*length=*/1);
|
||||
auto s_min = at::narrow(singular_values, /*dim=*/-1, /*start=*/-1, /*length=*/1);
|
||||
Tensor result;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
if (ord.toDouble() == -2.0) {
|
||||
result = s_min / s_max;
|
||||
} else {
|
||||
@ -2594,8 +2641,11 @@ struct KronImpl final {
|
||||
maxdim = std::max(self.dim(), other.dim());
|
||||
int64_t pad_self = maxdim - self.dim();
|
||||
int64_t pad_other = maxdim - other.dim();
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
a_reshape = c10::SmallVector<int64_t, 10>(2 * maxdim);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
b_reshape = c10::SmallVector<int64_t, 10>(2 * maxdim);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
result_reshape = c10::SmallVector<int64_t, 10>(maxdim);
|
||||
for (int64_t i = 0; i < maxdim; i++) {
|
||||
a_reshape[2 * i] = (i >= pad_self ? self.sizes()[i - pad_self] : 1);
|
||||
@ -2611,6 +2661,7 @@ struct KronImpl final {
|
||||
Tensor& kron_out(Tensor& result) const {
|
||||
TORCH_INTERNAL_ASSERT(result.defined(), "Cannot call kron_out with an undefined result tensor as the out argument. Please allocate a Tensor before calling kron_out with it.");
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
c10::SmallVector<int64_t, 10> mul_shape(2 * maxdim);
|
||||
for (int64_t i = 0; i < maxdim; i++) {
|
||||
mul_shape[2 * i] = a_reshape[2 * i];
|
||||
@ -2630,8 +2681,11 @@ struct KronImpl final {
|
||||
int64_t maxdim;
|
||||
Tensor self_view;
|
||||
Tensor other_view;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
c10::SmallVector<int64_t, 10> result_reshape;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
c10::SmallVector<int64_t, 10> a_reshape;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
c10::SmallVector<int64_t, 10> b_reshape;
|
||||
};
|
||||
}
|
||||
|
@ -22,11 +22,17 @@ namespace {
|
||||
|
||||
namespace at { namespace native {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(smooth_l1_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(smooth_l1_backward_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(huber_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(huber_backward_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(mse_stub);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
DEFINE_DISPATCH(mse_backward_stub);
|
||||
|
||||
Tensor cosine_embedding_loss(const Tensor& input1, const Tensor& input2, const Tensor& target, double margin, int64_t reduction) {
|
||||
@ -274,6 +280,7 @@ Tensor poisson_nll_loss(const Tensor& input, const Tensor& target, const bool lo
|
||||
}
|
||||
|
||||
if (full) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto stirling_term = target * at::log(target) - target + 0.5 * at::log(2 * c10::pi<double> * target);
|
||||
loss += stirling_term.masked_fill(target <= 1, 0);
|
||||
}
|
||||
@ -442,6 +449,7 @@ Tensor mse_loss_backward(const Tensor& grad_output, const Tensor& input, const T
|
||||
|
||||
Tensor& mse_loss_backward_out(const Tensor& grad_output,
|
||||
const Tensor& input, const Tensor& target, int64_t reduction, Tensor& grad_input) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto norm = reduction == Reduction::Mean ? 2. / input.numel() : 2.;
|
||||
auto iter = at::TensorIteratorConfig()
|
||||
.add_output(grad_input)
|
||||
|
@ -53,6 +53,7 @@ std::tuple<Tensor, Tensor> ctc_loss_cpu_template(const Tensor& log_probs, const
|
||||
TORCH_CHECK((int64_t) input_lengths.size() == batch_size, "input_lengths must be of size batch_size");
|
||||
TORCH_CHECK((int64_t) target_lengths.size() == batch_size, "target_lengths must be of size batch_size");
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
size_t tg_target_stride;
|
||||
int64_t max_target_length = 0;
|
||||
std::vector<int64_t> tg_batch_offsets(batch_size);
|
||||
@ -178,7 +179,9 @@ Tensor ctc_loss_backward_cpu_template(const Tensor& grad_out, const Tensor& log_
|
||||
Tensor grad = at::full_like(log_probs, neginf, LEGACY_CONTIGUOUS_MEMORY_FORMAT); // at this point, this is log of empty sum
|
||||
|
||||
// The admin bits. We don't do much checking and assume that the forward did.
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t tg_target_stride;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t max_target_length;
|
||||
std::vector<int64_t> tg_batch_offsets(batch_size);
|
||||
|
||||
|
@ -102,6 +102,7 @@ static void multilabel_margin_loss_forward_out_cpu_template(
|
||||
Tensor& is_target,
|
||||
int64_t reduction) {
|
||||
auto target_arg = TensorArg(target, "target", 2);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t nframe, dim;
|
||||
const int64_t ndims = input.dim();
|
||||
if (ndims <= 1) {
|
||||
@ -155,7 +156,9 @@ static void multilabel_margin_loss_backward_out_frame(
|
||||
const Tensor& is_target_contiguous,
|
||||
int64_t nframe,
|
||||
int64_t dim) {
|
||||
// NOLINTNEXTLINE(clang-diagnostic-unused-variable)
|
||||
CheckedFrom c = "multilabel_margin_loss_backward_out_frame";
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto is_target_arg = TensorArg(is_target_contiguous, "is_target", 5);
|
||||
|
||||
TORCH_CHECK(
|
||||
@ -167,6 +170,7 @@ static void multilabel_margin_loss_backward_out_frame(
|
||||
int64_t* target_data = target_contiguous.data_ptr<int64_t>();
|
||||
scalar_t* is_target_data = is_target_contiguous.data_ptr<scalar_t>();
|
||||
scalar_t g = static_cast<scalar_t>(
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
||||
reduction == Reduction::Mean ? 1. / (nframe * dim) : 1. / dim);
|
||||
|
||||
scalar_t* grad_input_row_data = grad_input.data_ptr<scalar_t>();
|
||||
@ -220,9 +224,11 @@ static void multilabel_margin_loss_backward_out_cpu_template(
|
||||
const Tensor& target,
|
||||
int64_t reduction,
|
||||
const Tensor& is_target) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t nframe, dim;
|
||||
CheckedFrom c = "multilabel_margin_loss_backward_cpu_template";
|
||||
auto target_arg = TensorArg(target, "target", 3);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
auto is_target_arg = TensorArg(is_target, "is_target", 5);
|
||||
const int64_t ndims = input.dim();
|
||||
|
||||
|
@ -94,6 +94,7 @@ void multi_margin_loss_out_cpu_template(
|
||||
const Scalar& margin,
|
||||
const Tensor& weight,
|
||||
int64_t reduction) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t nframe, dim;
|
||||
const auto ndims = input.dim();
|
||||
auto target_arg = TensorArg(target, "target", 2);
|
||||
@ -202,6 +203,7 @@ void multi_margin_loss_backward_out_cpu_template(
|
||||
const Scalar& margin,
|
||||
const Tensor& weight,
|
||||
int64_t reduction) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t nframe, dim;
|
||||
auto target_arg = TensorArg(target, "target", 2);
|
||||
const auto ndims = input.dim();
|
||||
|
@ -97,7 +97,9 @@ static void nll_loss_out_frame(
|
||||
|
||||
int64_t num_ignored = 0;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
scalar_t weight_partial_sums[cascade_sum_num_levels] = {0};
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
scalar_t loss_partial_sums[cascade_sum_num_levels] = {0};
|
||||
for (int64_t b = 0; b < batch_size; b++) {
|
||||
const int64_t cur_target = target_data[b];
|
||||
|
@ -152,7 +152,9 @@ static void nll_loss2d_forward_out_frame(
|
||||
const int64_t numiter = batch_size * map_size;
|
||||
|
||||
constexpr int64_t cascade_sum_num_levels = 8;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
scalar_t weight_partial_sums[cascade_sum_num_levels] = {0};
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
scalar_t loss_partial_sums[cascade_sum_num_levels] = {0};
|
||||
const int64_t level_power =
|
||||
std::max(int64_t(4), utils::CeilLog2(numiter) / cascade_sum_num_levels);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user