Concat namespaces in jit code (#138976)

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/138976
Approved by: https://github.com/Skylion007
This commit is contained in:
cyy
2024-10-26 17:41:25 +00:00
committed by PyTorch MergeBot
parent 4de93d1ead
commit 1a73255102
106 changed files with 216 additions and 614 deletions

View File

@ -5,8 +5,7 @@
#include <torch/csrc/jit/backends/backend_interface.h>
#include <torch/custom_class.h>
namespace torch {
namespace jit {
namespace torch::jit {
namespace {
// NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration)
inline c10::FunctionSchema getIsAvailableSchema() {
@ -115,5 +114,4 @@ class backend {
}
};
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -2,8 +2,7 @@
#include <stack>
namespace torch {
namespace jit {
namespace torch::jit {
std::atomic<DebugHandleType> BackendDebugInfoRecorder::unique_debug_handle_{0};
@ -33,5 +32,4 @@ BackendDebugInfoMapType BackendDebugInfoRecorder::stopRecording() {
return handles_to_inlined_callstack_ptrs_;
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -7,8 +7,7 @@
#include <atomic>
namespace torch {
namespace jit {
namespace torch::jit {
/*
* BackendDebugHandleManager is responsible for issuing debug handles to
@ -136,5 +135,4 @@ class TORCH_API BackendDebugInfoRecorder {
BackendDebugInfoMapType handles_to_inlined_callstack_ptrs_;
};
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -1,9 +1,7 @@
#include <c10/macros/Macros.h>
#include <torch/csrc/jit/backends/backend_debug_info.h>
namespace torch {
namespace jit {
namespace backend {
namespace torch::jit::backend {
namespace {
#ifdef BUILD_LITE_INTERPRETER
static auto cls = torch::class_<PyTorchBackendDebugInfoDummy>(
@ -18,6 +16,4 @@ static auto cls = torch::class_<PyTorchBackendDebugInfo>(
#endif
} // namespace
} // namespace backend
} // namespace jit
} // namespace torch
} // namespace torch::jit::backend

View File

@ -5,8 +5,7 @@
#endif
#include <torch/custom_class.h>
namespace torch {
namespace jit {
namespace torch::jit {
constexpr static auto kBackendUtilsNamespace = "backendutils";
constexpr static auto kBackendDebugInfoClass = "BackendDebugInfo";
@ -61,5 +60,4 @@ class PyTorchBackendDebugInfoDummy : public torch::CustomClassHolder {
PyTorchBackendDebugInfoDummy() = default;
};
#endif
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -6,8 +6,7 @@
#include <functional>
namespace torch {
namespace jit {
namespace torch::jit {
using DebugHandleType = int64_t;
@ -37,5 +36,4 @@ TORCH_API Module codegen_backend_module(
const c10::Dict<IValue, IValue>& method_compile_spec,
const c10::DictTypePtr& any_dict_ty);
} // namespace detail
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -7,8 +7,7 @@
#include <torch/csrc/jit/python/pybind_utils.h>
#include <torch/csrc/utils/pybind.h>
namespace torch {
namespace jit {
namespace torch::jit {
// Get all types that are shared in the module hierarchy rooted at \p mod.
std::unordered_set<TypePtr> getSharedModuleTypes(Module& mod) {
@ -189,5 +188,4 @@ void initJitBackendBindings(PyObject* module) {
"Object ", py::str(orig_module), " is not a ScriptModule"));
});
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -3,9 +3,7 @@
#include <torch/csrc/jit/python/pybind.h>
#include <torch/csrc/utils/pybind.h>
namespace torch {
namespace jit {
namespace torch::jit {
// Initialize Python bindings for JIT to_<backend> functions.
void initJitBackendBindings(PyObject* module);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -1,10 +1,8 @@
#include <torch/csrc/jit/backends/backend_interface.h>
namespace torch {
namespace jit {
namespace torch::jit {
PyTorchBackendInterface::PyTorchBackendInterface() noexcept = default;
PyTorchBackendInterface::~PyTorchBackendInterface() = default;
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -2,8 +2,7 @@
#include <torch/custom_class.h>
namespace torch {
namespace jit {
namespace torch::jit {
// Interface for a JIT backend.
class TORCH_API PyTorchBackendInterface : public torch::CustomClassHolder {
@ -30,5 +29,4 @@ class TORCH_API PyTorchBackendInterface : public torch::CustomClassHolder {
c10::IValue handle,
c10::impl::GenericList inputs) = 0;
};
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -1,8 +1,7 @@
#pragma once
#include <torch/csrc/jit/backends/backend_detail.h>
namespace torch {
namespace jit {
namespace torch::jit {
class backend_preprocess_register {
std::string backend_name_;
@ -14,5 +13,4 @@ class backend_preprocess_register {
detail::registerBackendPreprocessFunction(name, preprocess);
}
};
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -2,8 +2,7 @@
#include <torch/csrc/jit/frontend/sugared_value.h>
#include <torch/custom_class.h>
namespace torch {
namespace jit {
namespace torch::jit {
namespace {
// Essentially ClassNamespaceValue from import_source.cpp without the
// SourceImporterImpl reference. This helps resolve the
@ -67,5 +66,4 @@ std::shared_ptr<Resolver> loweredModuleResolver() {
return resolver;
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -2,9 +2,7 @@
#include <torch/csrc/jit/frontend/resolver.h>
namespace torch {
namespace jit {
namespace torch::jit {
// Create a Resolver for use in generating LoweredModules for specific backends.
TORCH_API std::shared_ptr<Resolver> loweredModuleResolver();
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -3,10 +3,7 @@
#include <string>
namespace torch {
namespace jit {
namespace mobile {
namespace coreml {
namespace torch::jit::mobile::coreml {
struct TensorSpec {
std::string name = "";
@ -26,7 +23,4 @@ static inline c10::ScalarType scalar_type(const std::string& type_string) {
return c10::ScalarType::Undefined;
}
} // namespace coreml
} // namespace mobile
} // namespace jit
} // namespace torch
} // namespace torch::jit::mobile::coreml

View File

@ -8,10 +8,7 @@
#include <memory>
#include <vector>
namespace torch {
namespace jit {
namespace xnnpack {
namespace delegate {
namespace torch::jit::xnnpack::delegate {
class XNNExecutor {
private:
@ -68,7 +65,4 @@ class XNNExecutor {
friend class XNNCompiler;
};
} // namespace delegate
} // namespace xnnpack
} // namespace jit
} // namespace torch
} // namespace torch::jit::xnnpack::delegate

View File

@ -9,10 +9,7 @@
#include <torch/csrc/jit/runtime/custom_operator.h>
#include <torch/csrc/jit/runtime/register_ops_utils.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
namespace torch::jit::fuser::cuda {
static std::atomic<bool> cuda_fusion_guard_mode{true};
@ -131,7 +128,4 @@ bool skipNode(const std::string& symbol_str, bool flip) {
getFuserInterface()->fn_skip_n(symbol_str, flip);
}
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::cuda

View File

@ -13,10 +13,7 @@
* Registration is done in torch/csrc/jit/codegen/cuda/register_interface.cpp
*/
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
namespace torch::jit::fuser::cuda {
TORCH_API std::atomic<bool>& getCudaFusionGuardMode();
@ -52,7 +49,4 @@ TORCH_API bool isEnabled();
TORCH_API bool setEnabled(bool is_enabled);
TORCH_API bool canBeEnabled();
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::cuda

View File

@ -13,10 +13,7 @@ namespace at {
struct DynamicLibrary;
}
namespace torch {
namespace jit {
namespace fuser {
namespace cpu {
namespace torch::jit::fuser::cpu {
// Represents a compiled CPU kernel and the metadata necessary to run it
struct TORCH_API FusedKernelCPU : public FusedKernel {
@ -43,7 +40,4 @@ struct TORCH_API FusedKernelCPU : public FusedKernel {
void (*kernel)(uint32_t, void**) = nullptr;
};
} // namespace cpu
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::cpu

View File

@ -2,10 +2,7 @@
#include <ATen/code_template.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cpu {
namespace torch::jit::fuser::cpu {
/*with type_as not checking type of its input, a fusion group can have non-fp32
tensor as input. Correct code for this case is generated, however, nvrtc does
@ -101,7 +98,4 @@ JIT_API void ${kernelName}(IndexType totalElements, void ** args) {
}
)");
} // namespace cpu
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::cpu

View File

@ -22,10 +22,7 @@
#include <string>
#include <vector>
namespace torch {
namespace jit {
namespace fuser {
namespace cpu {
namespace torch::jit::fuser::cpu {
#ifdef _MSC_VER
int wmkstemps(wchar_t* tmpl, int suffix_len) {
@ -135,7 +132,4 @@ struct TempFile {
std::string name_;
};
} // namespace cpu
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::cpu

View File

@ -4,10 +4,7 @@
#include <c10/core/CPUAllocator.h>
#include <torch/csrc/jit/codegen/onednn/LlgaTensorImpl.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
// Non-default dnnl::graph::allocator needs an allocator.
// We would let it use c10::GetCPUAllocator's allocator,
@ -152,9 +149,6 @@ at::ScalarType LlgaTensorDesc::aten_scalar_type() const {
}
}
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn
#endif // AT_MKLDNN_ENABLED()

View File

@ -6,10 +6,7 @@
#include <oneapi/dnnl/dnnl_graph.hpp>
#include <torch/csrc/jit/ir/ir.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
// Engine represents a device and its context. From the device kind, the engine
// knows how to generate code for the target device and what kind of device
@ -270,7 +267,4 @@ at::Tensor empty_llga(
dnnl::graph::tensor llga_from_aten_tensor(const at::Tensor& tensor);
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -5,10 +5,7 @@
#include <torch/csrc/jit/passes/dead_code_elimination.h>
#include <torch/csrc/jit/passes/subgraph_rewrite.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
static bool shouldDecomposeSilu(Node* node) {
if (node->kind() != aten::silu) {
@ -59,7 +56,4 @@ void DecomposeSiluForLLGA(std::shared_ptr<Graph>& graph) {
EliminateDeadCode(graph);
}
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -2,14 +2,8 @@
#include <torch/csrc/jit/ir/ir.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
void DecomposeSiluForLLGA(std::shared_ptr<Graph>& graph);
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -2,10 +2,7 @@
#include <torch/csrc/jit/ir/alias_analysis.h>
#include <torch/csrc/jit/runtime/symbolic_shape_registry_util.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
class SizeCheckMover {
private:
@ -82,7 +79,4 @@ void DeferSizeCheck(std::shared_ptr<Graph>& graph) {
SizeCheckMover(graph->block(), graph).run();
}
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -2,14 +2,8 @@
#include <torch/csrc/jit/ir/ir.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
void DeferSizeCheck(std::shared_ptr<Graph>& graph);
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -5,10 +5,7 @@
#include <torch/csrc/jit/passes/dead_code_elimination.h>
#include <torch/csrc/jit/passes/utils/subgraph_utils.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
void CreateLlgaSubgraphs(std::shared_ptr<Graph>& graph) {
AliasDb db(graph);
@ -25,7 +22,4 @@ void CreateLlgaSubgraphs(std::shared_ptr<Graph>& graph) {
EliminateDeadCode(graph);
}
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -3,10 +3,7 @@
#include <torch/csrc/jit/codegen/onednn/graph_helper.h>
#include <torch/csrc/jit/ir/ir.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
struct WorkBlock : public std::pair<Node*, Node*> {
using pair::pair;
@ -47,7 +44,4 @@ class GraphRewriter {
// torch/csrc/jit/passes/create_autodiff_subgraphs.cpp
void CreateLlgaSubgraphs(std::shared_ptr<Graph>& graph);
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -5,10 +5,7 @@
#include <torch/csrc/jit/jit_log.h>
#include <torch/csrc/jit/passes/utils/subgraph_utils.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
using opkind = dnnl::graph::op::kind;
@ -615,7 +612,4 @@ bool LlgaNodeWrapper::useOpaqueLayout(size_t offset) const {
return n->is(attr::output_layouts)[offset] == OPAQUE_LAYOUT;
}
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -5,10 +5,7 @@
#include <torch/csrc/jit/ir/alias_analysis.h>
#include <torch/csrc/jit/ir/ir.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
#define STRIDED_LAYOUT 0
#define OPAQUE_LAYOUT 1
@ -98,7 +95,4 @@ class LlgaNodeWrapper {
Node* n;
};
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -5,10 +5,7 @@
#include <torch/csrc/jit/passes/dead_code_elimination.h>
#include <torch/csrc/jit/passes/utils/subgraph_utils.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
void GraphRewriter::cleanupSubgraphs() {
auto curNode = *block_->nodes().rbegin();
@ -138,7 +135,4 @@ std::optional<Node*> GraphRewriter::tryMerge(Node* consumer, Node* producer) {
return consumer;
}
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -5,10 +5,7 @@
#include <torch/csrc/jit/passes/utils/subgraph_utils.h>
#include <torch/csrc/jit/runtime/graph_executor.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
//! [ Note -- prepareFusionGroupAndGuardOutputs implementation ]
//! shamelessly copying code from NNC (tensorexpr_fuser) with very little
@ -39,7 +36,4 @@ void prepareFusionGroupAndGuardOutputs(Block* block) {
}
}
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -2,14 +2,8 @@
#include <torch/csrc/jit/ir/ir.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
void prepareFusionGroupAndGuardOutputs(Block* block);
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -16,10 +16,8 @@
#include <torch/csrc/jit/runtime/graph_executor.h>
#include <torch/csrc/jit/runtime/operator_options.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit {
namespace fuser::onednn {
void fuseGraph(std::shared_ptr<Graph>& g) {
// Follow the process of the tensorexpr_fuser in profiling mode:
@ -95,8 +93,7 @@ void fuseGraph(std::shared_ptr<Graph>& g) {
}
}
} // namespace onednn
} // namespace fuser
} // namespace fuser::onednn
static Operation createLlgaKernel(const Node* node) {
auto kernel = std::make_shared<fuser::onednn::LlgaKernel>(node);
@ -178,5 +175,4 @@ RegisterOperators oneDNNGuardOp({
createLlgaGuardKernel,
AliasAnalysisKind::FROM_SCHEMA),
});
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -3,10 +3,8 @@
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/passes/pass_manager.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit {
namespace fuser::onednn {
static std::atomic<bool> onednn_enabled{false};
@ -16,8 +14,7 @@ static std::atomic<bool>& getLlgaEnabled() {
C10_EXPORT void fuseGraph(std::shared_ptr<Graph>& g);
} // namespace onednn
} // namespace fuser
} // namespace fuser::onednn
struct C10_EXPORT RegisterLlgaFuseGraph
: public PassManager<RegisterLlgaFuseGraph> {
@ -58,5 +55,4 @@ struct C10_EXPORT RegisterLlgaFuseGraph
}
};
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -4,10 +4,7 @@
#include <ATen/core/functional.h>
#include <torch/csrc/jit/jit_log.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
using namespace dnnl::graph;
using data_type = dnnl::graph::logical_tensor::data_type;
@ -293,7 +290,4 @@ void LlgaKernel::run(Stack& stack) {
#endif
}
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -10,10 +10,7 @@
#include <c10/util/CallOnce.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
using ArgSpec = LlgaTensorDesc;
using ArgSpecs = std::vector<ArgSpec>;
@ -89,7 +86,4 @@ class LlgaKernel {
bool is_initialized_ = false;
};
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -2,10 +2,7 @@
#include <torch/csrc/jit/codegen/onednn/layout_propagation.h>
#include <torch/csrc/jit/jit_log.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
static void LayoutPropagation(Node* n) {
if (!LlgaGraphHelper::isLlgaSubgraph(n))
@ -47,7 +44,4 @@ void PropagateLayout(const std::shared_ptr<Graph>& graph) {
LayoutPropagation(graph->block());
}
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -2,14 +2,8 @@
#include <torch/csrc/jit/ir/ir.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
void PropagateLayout(const std::shared_ptr<Graph>& graph);
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -4,10 +4,7 @@
#include <torch/csrc/jit/codegen/onednn/LlgaTensorImpl.h>
#include <torch/csrc/jit/ir/ir.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
class Operator {
public:
@ -146,7 +143,4 @@ class Operator {
dnnl::graph::op::kind k;
};
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -3,10 +3,7 @@
#include <torch/csrc/jit/passes/dead_code_elimination.h>
#include <torch/csrc/jit/passes/shape_analysis.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
static bool compareConstValue(Value* v, double d) {
auto ival = toIValue(v);
@ -179,7 +176,4 @@ void PrepareBinaryForLLGA(const std::shared_ptr<Graph>& graph) {
ConvertScalarToTensor(graph->block());
}
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -2,10 +2,7 @@
#include <torch/csrc/jit/ir/ir.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
// Prepare binary ops for LLGA
//
@ -20,7 +17,4 @@ namespace onednn {
//
void PrepareBinaryForLLGA(const std::shared_ptr<Graph>& graph);
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -1,9 +1,6 @@
#include <torch/csrc/jit/runtime/profiling_record.h>
namespace torch {
namespace jit {
namespace fuser {
namespace onednn {
namespace torch::jit::fuser::onednn {
static bool canFuseNode(const Node* node) {
switch (node->kind()) {
@ -48,7 +45,4 @@ class RegisterInterface {
static RegisterInterface register_interface_;
} // namespace
} // namespace onednn
} // namespace fuser
} // namespace jit
} // namespace torch
} // namespace torch::jit::fuser::onednn

View File

@ -25,10 +25,7 @@
using namespace torch::jit;
using namespace torch::jit::tensorexpr;
namespace torch {
namespace jit {
namespace mobile {
namespace nnc {
namespace torch::jit::mobile::nnc {
// TODO(mvz): temporarily disable NNC backend in mobile builds.
/*
@ -446,7 +443,4 @@ static c10::IValue preprocess(
// static auto reg = torch::jit::backend_preprocess_register("nnc", preprocess);
} // namespace nnc
} // namespace mobile
} // namespace jit
} // namespace torch
} // namespace torch::jit::mobile::nnc

View File

@ -4,10 +4,7 @@
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/mobile/nnc/context.h>
namespace torch {
namespace jit {
namespace mobile {
namespace nnc {
namespace torch::jit::mobile::nnc {
// Performs Ahead Of Time compilation of a given method in a model
// returning the compiled function and LLVM assembly code
@ -18,7 +15,4 @@ TORCH_API std::pair<std::unique_ptr<Function>, const std::string> aotCompile(
const std::vector<at::ScalarType>& types,
const std::string& kernel_func_name = "func");
} // namespace nnc
} // namespace mobile
} // namespace jit
} // namespace torch
} // namespace torch::jit::mobile::nnc

View File

@ -3,10 +3,7 @@
#include <torch/csrc/jit/backends/backend.h>
#include <torch/csrc/jit/mobile/nnc/context.h>
namespace torch {
namespace jit {
namespace mobile {
namespace nnc {
namespace torch::jit::mobile::nnc {
class NNCBackend : public PyTorchBackendInterface {
public:
@ -55,7 +52,4 @@ namespace {
// static const auto cls = torch::jit::backend<NNCBackend>("nnc");
} // namespace
} // namespace nnc
} // namespace mobile
} // namespace jit
} // namespace torch
} // namespace torch::jit::mobile::nnc

View File

@ -7,10 +7,7 @@
#include <torch/csrc/jit/mobile/nnc/registry.h>
namespace torch {
namespace jit {
namespace mobile {
namespace nnc {
namespace torch::jit::mobile::nnc {
constexpr int64_t kProducedNNCFileFormatVersion = 0x1L;
@ -342,7 +339,4 @@ Function* CompilationUnit::find_function(const c10::QualifiedName& name) const {
return it->second.get();
}
} // namespace nnc
} // namespace mobile
} // namespace jit
} // namespace torch
} // namespace torch::jit::mobile::nnc

View File

@ -8,10 +8,7 @@
#include <ATen/core/ivalue.h>
#include <c10/core/ScalarType.h>
namespace torch {
namespace jit {
namespace mobile {
namespace nnc {
namespace torch::jit::mobile::nnc {
// Specify the requirements on an input tensor.
// TODO: support input tensor with dynamic shape (PR #54982)
@ -223,7 +220,4 @@ class TORCH_API CompilationUnit {
std::unordered_map<c10::QualifiedName, std::unique_ptr<Function>> functions_;
};
} // namespace nnc
} // namespace mobile
} // namespace jit
} // namespace torch
} // namespace torch::jit::mobile::nnc

View File

@ -1,13 +1,7 @@
#include <torch/csrc/jit/mobile/nnc/registry.h>
namespace torch {
namespace jit {
namespace mobile {
namespace nnc {
namespace torch::jit::mobile::nnc {
C10_DEFINE_REGISTRY(NNCKernelRegistry, NNCKernel);
} // namespace nnc
} // namespace mobile
} // namespace jit
} // namespace torch
} // namespace torch::jit::mobile::nnc

View File

@ -3,10 +3,7 @@
#include <c10/util/Exception.h>
#include <c10/util/Registry.h>
namespace torch {
namespace jit {
namespace mobile {
namespace nnc {
namespace torch::jit::mobile::nnc {
using nnc_kernel_function_type = int(void**);
@ -40,7 +37,4 @@ inline std::unique_ptr<NNCKernel> get_nnc_kernel(const std::string& id) {
} // namespace registry
} // namespace nnc
} // namespace mobile
} // namespace jit
} // namespace torch
} // namespace torch::jit::mobile::nnc

View File

@ -5,8 +5,7 @@
#include <torch/csrc/jit/passes/quantization/helper.h>
#include <torch/csrc/jit/runtime/graph_iterator.h>
namespace torch {
namespace jit {
namespace torch::jit {
namespace {
@ -70,5 +69,4 @@ Module DBRQuantRemoveRedundantAliases(Module& module) {
return module;
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -2,8 +2,7 @@
#include <torch/csrc/jit/api/module.h>
namespace torch {
namespace jit {
namespace torch::jit {
// This function replaces instances of
//
@ -17,5 +16,4 @@ namespace jit {
// on the module forward, if it's safe to do so.
TORCH_API Module DBRQuantRemoveRedundantAliases(Module& module);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -4,8 +4,7 @@
#include <torch/csrc/jit/passes/onnx/helper.h>
#include <torch/csrc/jit/passes/utils/subgraph_utils.h>
namespace torch {
namespace jit {
namespace torch::jit {
void convertSubgraphToSubBlock(Block* block) {
for (auto it = block->nodes().begin(), end = block->nodes().end();
@ -54,5 +53,4 @@ void ONNXAutogradFunctionProcess(std::shared_ptr<Graph>& graph) {
convertSubgraphToSubBlock(graph->block());
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -2,10 +2,8 @@
#include <torch/csrc/jit/ir/ir.h>
namespace torch {
namespace jit {
namespace torch::jit {
TORCH_API void ONNXAutogradFunctionProcess(std::shared_ptr<Graph>& graph);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -1,7 +1,6 @@
#include <torch/csrc/jit/passes/onnx/pattern_conversion/common.h>
namespace torch {
namespace jit {
namespace torch::jit {
bool IndexingPatternFinder::IsSameSource(const Node* n, const Node* m) {
const auto source_n = n->sourceRange().source();
@ -41,5 +40,4 @@ std::vector<Node*> IndexingPatternFinder::FetchSliceAndSelect(
return slice_and_select_node;
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -4,8 +4,7 @@
// Functions used by both encapsulation and conversion.
namespace torch {
namespace jit {
namespace torch::jit {
struct IndexingPatternFinder {
public:
@ -15,5 +14,4 @@ struct IndexingPatternFinder {
static bool IsSameSource(const Node* n, const Node* m);
};
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -12,8 +12,7 @@
// EDITING THIS FILE? READ THIS FIRST!
// see Note [Edit Pattern Conversion] in pattern_conversion.h
namespace torch {
namespace jit {
namespace torch::jit {
// Converting inplace index_put to ONNX
namespace {
@ -392,5 +391,4 @@ std::vector<Value*> ConvertPatternFromSubblock(
return res;
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -3,8 +3,7 @@
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/utils/pybind.h>
namespace torch {
namespace jit {
namespace torch::jit {
// Introduction
//
@ -42,5 +41,4 @@ TORCH_API std::vector<Value*> ConvertPatternFromSubblock(
py::dict& env,
py::set& values_in_env);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -7,8 +7,7 @@
// EDITING THIS FILE? READ THIS FIRST!
// see Note [Edit Pattern Encapsulation] in pattern_encapsulation.h
namespace torch {
namespace jit {
namespace torch::jit {
namespace {
@ -87,5 +86,4 @@ std::optional<Node*> EncapsulatePatternIntoSubblock(Node* n) {
return std::nullopt;
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -2,8 +2,7 @@
#include <torch/csrc/jit/ir/ir.h>
namespace torch {
namespace jit {
namespace torch::jit {
// Introduction
//
@ -30,5 +29,4 @@ namespace jit {
// pattern is stored as attr::name.
TORCH_API std::optional<Node*> EncapsulatePatternIntoSubblock(Node* n);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -5,8 +5,7 @@
#include <stack>
namespace torch {
namespace jit {
namespace torch::jit {
namespace {
class ModuleUseDeduper {
public:
@ -125,5 +124,4 @@ void DedupModuleUses(Module& module) {
d.dedup();
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -2,8 +2,7 @@
#include <torch/csrc/jit/api/module.h>
namespace torch {
namespace jit {
namespace torch::jit {
/** Recursively deduplicate multiple uses of the same module by
* creating an instance clone for each use of the module, which means
@ -24,5 +23,4 @@ namespace jit {
*/
TORCH_API void DedupModuleUses(Module& module);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -16,8 +16,7 @@
#include <utility>
namespace torch {
namespace jit {
namespace torch::jit {
namespace {
@ -275,5 +274,4 @@ Module FinalizeOnDevicePTQ(
return module;
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -4,8 +4,7 @@
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/passes/quantization/quantization_type.h>
namespace torch {
namespace jit {
namespace torch::jit {
/** \brief Backend specific pass to fuse dequantize - op - quantize calls
* as quantized_op calls.
@ -59,5 +58,4 @@ TORCH_API Module FinalizeOnDevicePTQ(
Module& module,
QuantType quant_type,
const std::string& method_name);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -1,8 +1,7 @@
#include <torch/csrc/jit/passes/quantization/fusion_passes.h>
#include <torch/csrc/jit/passes/subgraph_rewrite.h>
namespace torch {
namespace jit {
namespace torch::jit {
namespace {
void fuseQuantizeAddReluImpl(std::shared_ptr<Graph>& graph) {
@ -59,5 +58,4 @@ void FuseQuantizedAddRelu(std::shared_ptr<Graph>& graph) {
fuseQuantizeAddReluImpl(graph);
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -2,8 +2,6 @@
#include <torch/csrc/jit/ir/ir.h>
namespace torch {
namespace jit {
namespace torch::jit {
TORCH_API void FuseQuantizedAddRelu(std::shared_ptr<Graph>& graph);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -5,8 +5,7 @@
#include <utility>
namespace torch {
namespace jit {
namespace torch::jit {
using graph_rewrite_helper::getFuncName;
@ -795,5 +794,4 @@ bool is_batchnorm3d_module(
"__torch__.torch.nn.modules.batchnorm.BatchNorm3d");
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -8,8 +8,7 @@
#include <functional>
#include <regex>
namespace torch {
namespace jit {
namespace torch::jit {
using graph_rewrite_helper::getFuncName;
@ -212,5 +211,4 @@ bool is_batchnorm3d_module(
const Match& match,
const std::unordered_map<std::string, Value*>& vmap);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -17,8 +17,7 @@
#include <string>
#include <utility>
namespace torch {
namespace jit {
namespace torch::jit {
using ModuleQConfigMap = std::unordered_map<ModulePtr, std::optional<QConfig>>;
@ -1720,5 +1719,4 @@ Module InsertObserversForOnDevicePTQ(
cloned_module, observer_method_name, /* is_entry_point */ true);
return cloned_module;
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -14,8 +14,7 @@ struct hash<torch::jit::Module> {
} // namespace std
namespace torch {
namespace jit {
namespace torch::jit {
using QConfig = std::tuple<Module, Module>;
using QConfigDict = std::unordered_map<std::string, std::optional<QConfig>>;
@ -64,5 +63,4 @@ TORCH_API Module InsertObserversForOnDevicePTQ(
bool inplace,
QuantType quant_type = QuantType::STATIC);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -15,8 +15,7 @@
#include <stack>
#include <utility>
namespace torch {
namespace jit {
namespace torch::jit {
namespace {
using graph_rewrite_helper::PatternInfo;
@ -1841,5 +1840,4 @@ Module InsertQuantDeQuantOnDevicePTQ(
h.propagateQuantizationOps(module);
return module;
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -4,8 +4,7 @@
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/passes/quantization/quantization_type.h>
namespace torch {
namespace jit {
namespace torch::jit {
/** Replicate quantize node for prim::If blocks, so that we can match
* quantization patterns in prim::If blocks
@ -42,5 +41,4 @@ TORCH_API Module InsertQuantDeQuantOnDevicePTQ(
bool debug,
QuantType quant_type = QuantType::STATIC);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -10,8 +10,7 @@
#include <unordered_map>
#include <utility>
namespace torch {
namespace jit {
namespace torch::jit {
struct QuantFusionInfo {
std::string quantized_op_name;
@ -1260,5 +1259,4 @@ graph(%a_dequant, %w_quant, %b, %stride, %padding, %output_padding, %groups, %di
std::move(conv_transpose2d_with_quant_prepack)}};
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -1,7 +1,6 @@
#include <torch/csrc/jit/passes/quantization/quantization_type.h>
namespace torch {
namespace jit {
namespace torch::jit {
std::ostream& operator<<(std::ostream& os, QuantType t) {
switch (t) {
@ -17,5 +16,4 @@ std::ostream& operator<<(std::ostream& os, QuantType t) {
return os;
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -2,8 +2,7 @@
#include <cstdint>
#include <ostream>
namespace torch {
namespace jit {
namespace torch::jit {
// Quantization type (dynamic quantization, static quantization).
// Should match the Python enum in quantize_jit.py
@ -11,5 +10,4 @@ enum QuantType : std::uint8_t { DYNAMIC = 0, STATIC };
std::ostream& operator<<(std::ostream& os, QuantType t);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -7,8 +7,7 @@
#include <torch/csrc/jit/passes/quantization/helper.h>
#include <torch/csrc/jit/passes/quantization/register_packed_params.h>
namespace torch {
namespace jit {
namespace torch::jit {
namespace {
bool isPrepackNode(Node* n) {
@ -144,5 +143,4 @@ std::unordered_set<std::string> RegisterPrePackParams(
return packed_param_names;
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -4,8 +4,7 @@
#include <torch/csrc/jit/ir/ir.h>
#include <memory>
namespace torch {
namespace jit {
namespace torch::jit {
using PrePackParamFilterFn = std::function<bool(Node*)>;
@ -16,5 +15,4 @@ TORCH_API std::unordered_set<std::string> RegisterPrePackParams(
const std::string& attr_prefix);
TORCH_API std::string joinPaths(const std::vector<std::string>& paths);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -6,8 +6,7 @@
#include <c10/util/irange.h>
namespace torch {
namespace jit {
namespace torch::jit {
namespace {
IValue deepCopy(const IValue& self) {
@ -305,5 +304,4 @@ void checkAliasAnnotation(
checkWrites(inputsToCheck, inputsDeepCopy);
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -6,8 +6,7 @@
#include <string>
#include <vector>
namespace torch {
namespace jit {
namespace torch::jit {
// Verify that alias annotations are correct. See impl for definition of
// "correct".
@ -18,5 +17,4 @@ TORCH_API void checkAliasAnnotation(
const std::shared_ptr<Graph>& graph,
std::vector<IValue> pythonInputs,
const std::string& unqualifiedOpName);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -4,8 +4,7 @@
#include <algorithm>
#include <queue>
namespace torch {
namespace jit {
namespace torch::jit {
namespace {
void makePointerToImpl(Element* from, Element* to) {
@ -232,5 +231,4 @@ void MemoryDAG::setWildcards(
Element* MemoryDAG::unsafeMakeFreshValue(const Value* v) {
return makeFreshValueImpl(v, indexToElementMap_);
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -16,8 +16,7 @@
// Uses a compressed index representation for faster comparisons
typedef c10::SparseBitVector<256> MemoryLocations;
namespace torch {
namespace jit {
namespace torch::jit {
struct Value;
@ -172,5 +171,4 @@ class TORCH_API MemoryDAGBuilder {
// the map to construct the `MemoryDAG`
std::vector<std::unique_ptr<Element>> indexToElementMap_;
};
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -2,8 +2,7 @@
// Location for Commonly Used Shape registries
namespace torch {
namespace jit {
namespace torch::jit {
// Requirements:
// dims : preserved from the first argument
@ -72,5 +71,4 @@ std::shared_ptr<OperatorSet> ops_one_tensor_in_shape_transform() {
});
return ops;
};
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -4,8 +4,7 @@
#include <torch/csrc/jit/ir/ir.h>
#include <memory>
namespace torch {
namespace jit {
namespace torch::jit {
// Moved from shape_analysis.cpp
// Requirements:
@ -27,5 +26,4 @@ std::shared_ptr<OperatorSet> nn_ops_first_input_preserving();
// tensor inputs : 1
// tensor outputs : 1
std::shared_ptr<OperatorSet> ops_one_tensor_in_shape_transform();
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -1,7 +1,6 @@
#include <torch/csrc/jit/passes/utils/optimization_utils.h>
namespace torch {
namespace jit {
namespace torch::jit {
bool nonConstantParameters(Node* n) {
// Checks if the parameters, not including the
@ -14,5 +13,4 @@ bool nonConstantParameters(Node* n) {
return false;
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -3,12 +3,10 @@
#include <torch/csrc/jit/ir/ir.h>
namespace torch {
namespace jit {
namespace torch::jit {
// Checks if the parameters, not including the
// first param are all constants.
bool nonConstantParameters(Node* n);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -9,9 +9,7 @@
#include <utility>
namespace torch {
namespace jit {
namespace SubgraphUtils {
namespace torch::jit::SubgraphUtils {
namespace {
bool hasSubgraph(Node* n) {
@ -633,6 +631,4 @@ std::string generateNameForGraph(
return truncateStrWithHash(graph_name.str(), maxlen);
}
} // namespace SubgraphUtils
} // namespace jit
} // namespace torch
} // namespace torch::jit::SubgraphUtils

View File

@ -4,14 +4,11 @@
#include <torch/csrc/jit/ir/alias_analysis.h>
#include <torch/csrc/jit/ir/ir.h>
namespace torch {
namespace jit {
// Utilities for dealing with nodes that contain subgraphs.
//
// They handle the complexity of editing inputs/outputs as you merge nodes in
// and out of subgraphs.
namespace SubgraphUtils {
namespace torch::jit::SubgraphUtils {
// Create a new subgraph node that contains only `n`. The new subgraph will have
// `subgraphKind` as its type.
@ -70,6 +67,4 @@ TORCH_API std::string generateNameForGraph(
size_t maxlen = 40,
const std::string& prefix = "fused");
} // namespace SubgraphUtils
} // namespace jit
} // namespace torch
} // namespace torch::jit::SubgraphUtils

View File

@ -6,9 +6,7 @@
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/tensorexpr/mem_dependency_checker.h>
namespace torch {
namespace jit {
namespace tensorexpr {
namespace torch::jit::tensorexpr {
class Expr;
class Buf;
@ -74,6 +72,4 @@ TORCH_API bool isOverlapping(
const StorePtr& S,
const LoadPtr& L);
} // namespace tensorexpr
} // namespace jit
} // namespace torch
} // namespace torch::jit::tensorexpr

View File

@ -6,10 +6,7 @@
#include <utility>
#include <vector>
namespace torch {
namespace jit {
namespace tensorexpr {
namespace analysis {
namespace torch::jit::tensorexpr::analysis {
// A simple class containing the start and end of a range in a single dimension.
struct TORCH_API Bound {
@ -121,7 +118,4 @@ std::vector<IndexBounds> TORCH_API subtractIndicesBounds(
std::vector<IndexBounds> TORCH_API
subtractIndicesBounds(const IndexBounds& A, const IndexBounds& B);
} // namespace analysis
} // namespace tensorexpr
} // namespace jit
} // namespace torch
} // namespace torch::jit::tensorexpr::analysis

View File

@ -1,8 +1,6 @@
#pragma once
namespace torch {
namespace jit {
namespace tensorexpr {
namespace torch::jit::tensorexpr {
constexpr auto cpp_intrinsics_definition = R"(
namespace std {
@ -31,6 +29,4 @@ To bitcast(const From& v) {
} // namespace std
)";
} // namespace tensorexpr
} // namespace jit
} // namespace torch
} // namespace torch::jit::tensorexpr

View File

@ -1,8 +1,6 @@
#pragma once
namespace torch {
namespace jit {
namespace tensorexpr {
namespace torch::jit::tensorexpr {
constexpr auto philox_random_string = R"(
@ -99,6 +97,4 @@ __device__ __inline__ float Uint32ToFloat(unsigned int x) {
)";
} // namespace tensorexpr
} // namespace jit
} // namespace torch
} // namespace torch::jit::tensorexpr

View File

@ -2,9 +2,7 @@
#include <c10/core/ScalarType.h>
#include <memory>
namespace torch {
namespace jit {
namespace tensorexpr {
namespace torch::jit::tensorexpr {
template <typename Node>
using NodePtr = std::shared_ptr<Node>;
@ -124,6 +122,4 @@ using SyncThreadsPtr = NodePtr<SyncThreads>;
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_DECLARE);
#undef IMM_DECLARE
} // namespace tensorexpr
} // namespace jit
} // namespace torch
} // namespace torch::jit::tensorexpr

View File

@ -11,9 +11,7 @@
#include <ATen/core/ivalue.h>
namespace torch {
namespace jit {
namespace tensorexpr {
namespace torch::jit::tensorexpr {
enum CompareSelectOperation {
kEQ = 0,
@ -918,6 +916,4 @@ TORCH_API ExprPtr flatten_index(
const std::vector<ExprPtr>& indices,
const std::vector<ExprPtr>& strides);
} // namespace tensorexpr
} // namespace jit
} // namespace torch
} // namespace torch::jit::tensorexpr

View File

@ -5,9 +5,7 @@
#include <torch/csrc/jit/tensorexpr/ir_mutator.h>
namespace torch {
namespace jit {
namespace tensorexpr {
namespace torch::jit::tensorexpr {
class TORCH_API IRCloner : public IRMutator {
public:
@ -61,6 +59,4 @@ class TORCH_API IRCloner : public IRMutator {
StmtPtr mutate(const CondPtr& v) override;
};
} // namespace tensorexpr
} // namespace jit
} // namespace torch
} // namespace torch::jit::tensorexpr

View File

@ -3,9 +3,7 @@
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
namespace torch {
namespace jit {
namespace tensorexpr {
namespace torch::jit::tensorexpr {
class Expr;
class ExprHandle;
@ -53,6 +51,4 @@ TORCH_API void verify(const StmtPtr&);
TORCH_API void verify(const ExprPtr&);
TORCH_API void verify(const ExprHandle&);
} // namespace tensorexpr
} // namespace jit
} // namespace torch
} // namespace torch::jit::tensorexpr

View File

@ -3,9 +3,7 @@
#include <torch/csrc/jit/tensorexpr/operators/misc.h>
#include <torch/csrc/jit/tensorexpr/tensor.h>
namespace torch {
namespace jit {
namespace tensorexpr {
namespace torch::jit::tensorexpr {
// An API to compute 2D depthwise convolutions with bias.
TORCH_API Tensor conv2d_depthwise(
@ -100,6 +98,4 @@ Tensor computeMkldnnPrepackedConvRun(
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
} // namespace tensorexpr
} // namespace jit
} // namespace torch
} // namespace torch::jit::tensorexpr

View File

@ -2,9 +2,7 @@
#include <torch/csrc/jit/tensorexpr/kernel.h>
namespace torch {
namespace jit {
namespace tensorexpr {
namespace torch::jit::tensorexpr {
Tensor computeMatmul(
const std::vector<ArgValue>& inputs,
@ -19,6 +17,4 @@ Tensor computeAddMM(
const std::optional<ScalarType>& outputType,
at::Device device);
} // namespace tensorexpr
} // namespace jit
} // namespace torch
} // namespace torch::jit::tensorexpr

View File

@ -2,9 +2,7 @@
#include <torch/csrc/jit/tensorexpr/kernel.h>
namespace torch {
namespace jit {
namespace tensorexpr {
namespace torch::jit::tensorexpr {
Tensor computeBatchNorm(
const std::vector<ArgValue>& inputs,
@ -13,6 +11,4 @@ Tensor computeBatchNorm(
const std::optional<ScalarType>& outputType,
at::Device device);
} // namespace tensorexpr
} // namespace jit
} // namespace torch
} // namespace torch::jit::tensorexpr

View File

@ -2,9 +2,7 @@
#include <torch/csrc/jit/tensorexpr/kernel.h>
namespace torch {
namespace jit {
namespace tensorexpr {
namespace torch::jit::tensorexpr {
TORCH_API Tensor computeSign(
const std::vector<ArgValue>& inputs,
@ -81,6 +79,4 @@ Tensor computeScalar(
const std::function<ExprHandle(const ExprHandle&, const ExprHandle&)>&
innerExpr);
} // namespace tensorexpr
} // namespace jit
} // namespace torch
} // namespace torch::jit::tensorexpr

View File

@ -2,9 +2,7 @@
#include <torch/csrc/jit/tensorexpr/kernel.h>
namespace torch {
namespace jit {
namespace tensorexpr {
namespace torch::jit::tensorexpr {
TORCH_API ExprHandle quantizePerTensorQParamFromArg(ArgValue arg);
@ -155,6 +153,4 @@ TORCH_API Tensor computeQuantizedSigmoidExternalCall(
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device);
} // namespace tensorexpr
} // namespace jit
} // namespace torch
} // namespace torch::jit::tensorexpr

Some files were not shown because too many files have changed in this diff Show More