[nativert] clean up some migration side-effects (#156919)

Summary: explicit torch::nativert namespace usage + // manual declarations

Test Plan:
ci

Rollback Plan:

Differential Revision: D77328855

Pull Request resolved: https://github.com/pytorch/pytorch/pull/156919
Approved by: https://github.com/zhxchen17
This commit is contained in:
Dylan Maloy
2025-06-26 20:28:28 +00:00
committed by PyTorch MergeBot
parent b6e625e34f
commit 81759afed4
15 changed files with 44 additions and 67 deletions

View File

@ -16,8 +16,6 @@
namespace torch::nativert::detail {
using torch::nativert::Value;
class ITreeSpec;
using ITreeFlattenFn =

View File

@ -53,7 +53,7 @@ std::string extractToTemporaryFolder(
<< " from archive path: " << path << " size: " << dataSize;
File extracted(extractedFilename, O_CREAT | O_WRONLY, 0640);
const auto bytesWritten = torch::nativert::writeFull(
const auto bytesWritten = writeFull(
extracted.fd(), const_cast<void*>(dataPointer.get()), dataSize);
TORCH_CHECK(
bytesWritten != -1,

View File

@ -99,8 +99,7 @@ class OpKernel {
explicit OpKernel(
const Node* node,
std::optional<c10::Device> device = std::nullopt,
torch::nativert::OpKernelKind kind =
torch::nativert::OpKernelKind::kInterpreterFallbackKernel)
OpKernelKind kind = OpKernelKind::kInterpreterFallbackKernel)
: node_(node), device_(device), kind_(kind) {
VLOG(1) << "Initializing kernel for node: " << *node_;
}
@ -110,17 +109,17 @@ class OpKernel {
}
void compute(ExecutionFrame& executionFrame) const;
torch::nativert::OpKernelKind kind() const {
OpKernelKind kind() const {
return kind_;
}
bool hasPrimKernel() const {
return kind() == torch::nativert::OpKernelKind::kPrimKernel;
return kind() == OpKernelKind::kPrimKernel;
}
bool hasStaticDispatch() const {
return kind() == torch::nativert::OpKernelKind::kStaticDispatchKernel ||
kind() == torch::nativert::OpKernelKind::kNativeStaticDispatchKernel;
return kind() == OpKernelKind::kStaticDispatchKernel ||
kind() == OpKernelKind::kNativeStaticDispatchKernel;
}
size_t numInputs() const {
@ -154,7 +153,7 @@ class OpKernel {
std::optional<c10::Device> device_;
const static bool blockingEnabled_;
// this should be set in the ctor!
const torch::nativert::OpKernelKind kind_;
const OpKernelKind kind_;
};
} // namespace torch::nativert

View File

@ -175,7 +175,7 @@ void WorkUnit::run(ThreadPoolExecutor* executor, SessionState* session) {
ParallelGraphExecutor::ParallelGraphExecutor(
const Graph& graph,
std::vector<std::unique_ptr<OpKernel>> nodeKernels,
const torch::nativert::ExecutorConfig& executorConfig)
const ExecutorConfig& executorConfig)
: GraphExecutorBase(graph, std::move(nodeKernels), executorConfig),
workUnits_(
graph.nodes().size() - 2 /* no need for prim.Input or Prim.Output */),

View File

@ -1,8 +1,8 @@
#pragma once
#include <c10/util/Semaphore.h>
#include <torch/nativert/executor/GraphExecutorBase.h> // @manual
#include <torch/nativert/executor/SessionState.h> // @manual
#include <torch/nativert/executor/GraphExecutorBase.h>
#include <torch/nativert/executor/SessionState.h>
#include <thread>
namespace moodycamel {
@ -71,7 +71,7 @@ class ParallelGraphExecutor : public GraphExecutorBase {
ParallelGraphExecutor(
const Graph& graph,
std::vector<std::unique_ptr<OpKernel>> nodeKernels,
const torch::nativert::ExecutorConfig& executorConfig);
const ExecutorConfig& executorConfig);
std::vector<c10::IValue> execute(
ExecutionFrame& frame,

View File

@ -9,9 +9,6 @@
namespace torch::nativert {
using torch::nativert::ExecutionFrame;
using torch::nativert::Node;
template <typename T, typename __atomic_base = std::atomic<T>>
struct copyable_atomic : public __atomic_base {
public:

View File

@ -17,8 +17,7 @@ class FunctionSchema {
explicit FunctionSchema(
const c10::FunctionSchema& schema,
AliasingSpec&& aliasing_spec = {},
torch::nativert::OpKernelKind kernel_kind =
torch::nativert::OpKernelKind::kInterpreterFallbackKernel)
OpKernelKind kernel_kind = OpKernelKind::kInterpreterFallbackKernel)
: aliasing_spec_(std::move(aliasing_spec)),
kernel_kind_(kernel_kind),
c10_fn_schema_(schema) {}
@ -33,13 +32,13 @@ class FunctionSchema {
bool alias(size_t input_idx, size_t output_idx) const;
C10_ALWAYS_INLINE torch::nativert::OpKernelKind kernel_kind() const {
C10_ALWAYS_INLINE OpKernelKind kernel_kind() const {
return kernel_kind_;
}
private:
AliasingSpec aliasing_spec_;
torch::nativert::OpKernelKind kernel_kind_;
OpKernelKind kernel_kind_;
c10::FunctionSchema c10_fn_schema_;
};

View File

@ -8,8 +8,8 @@
#include <c10/util/Enumerate.h>
#include <c10/util/FbcodeMaps.h>
#include <c10/util/StringUtil.h>
#include <torch/nativert/executor/Placement.h> // @manual
#include <torch/nativert/graph/TensorMeta.h> // @manual
#include <torch/nativert/executor/Placement.h>
#include <torch/nativert/graph/TensorMeta.h>
namespace torch::nativert {
@ -281,7 +281,7 @@ void Node::applyDevicePlacement(const Placement& placement) {
auto device = std::get<c10::Device>(attribute.value);
auto targetDevice =
placement.getMappedDevice(std::get<c10::Device>(attribute.value));
if (!torch::nativert::isSameDevice(targetDevice, device)) {
if (!isSameDevice(targetDevice, device)) {
LOG(INFO) << "Overriding " << device.str() << " to "
<< targetDevice.str() << " for node " << *this;
attribute.value = targetDevice;
@ -1282,7 +1282,7 @@ std::unique_ptr<Graph> Parser::parse() {
}
// For graph textual format, it should be safe to assume all
// inputs/outputs are from users.
graph_->setSignature(torch::nativert::GraphSignature{signature_});
graph_->setSignature(GraphSignature{signature_});
graph_->finalize();
graph_->lint();
// TODO: Might have some source left over, should check it if so.

View File

@ -4,8 +4,8 @@
namespace torch::nativert {
void selectScalarOverload(torch::nativert::Graph* graph);
void selectScalarOverload(Graph* graph);
std::string selectScalarOverloadName(const torch::nativert::Node& node);
std::string selectScalarOverloadName(const Node& node);
} // namespace torch::nativert

View File

@ -184,7 +184,7 @@ std::unique_ptr<Graph> jsonToSubgraph(
graphInputs = std::move(reorderedGraphInputs);
auto reorderedSignature = *signature;
reorderedSignature.set_input_specs(reorderedInputSpecs);
graph->setSignature(torch::nativert::GraphSignature{reorderedSignature});
graph->setSignature(GraphSignature{reorderedSignature});
}
for (const auto& input : graphInputs) {
@ -408,7 +408,7 @@ std::unique_ptr<Graph> jsonToSubgraph(
}
sig.set_output_specs(std::move(outputSpecs));
graph->setSignature(torch::nativert::GraphSignature{sig});
graph->setSignature(GraphSignature{sig});
}
// weightsTensorMeta are indexed by weight's name, not graph input's name
@ -462,7 +462,7 @@ Constant constantToValue(
bool loadNodeMetadata) {
switch (jsonArg.tag()) {
case torch::_export::Argument::Tag::AS_NONE:
return torch::nativert::None();
return None();
case torch::_export::Argument::Tag::AS_INT:
return jsonArg.get_as_int();
case torch::_export::Argument::Tag::AS_INTS: {
@ -491,15 +491,13 @@ Constant constantToValue(
return ret;
}
case torch::_export::Argument::Tag::AS_SCALAR_TYPE:
return torch::nativert::convertJsonScalarType(
jsonArg.get_as_scalar_type());
return convertJsonScalarType(jsonArg.get_as_scalar_type());
case torch::_export::Argument::Tag::AS_MEMORY_FORMAT:
return torch::nativert::convertJsonMemoryFormat(
jsonArg.get_as_memory_format());
return convertJsonMemoryFormat(jsonArg.get_as_memory_format());
case torch::_export::Argument::Tag::AS_LAYOUT:
return torch::nativert::convertJsonLayout(jsonArg.get_as_layout());
return convertJsonLayout(jsonArg.get_as_layout());
case torch::_export::Argument::Tag::AS_DEVICE:
return torch::nativert::convertJsonDevice(jsonArg.get_as_device());
return convertJsonDevice(jsonArg.get_as_device());
case torch::_export::Argument::Tag::AS_BOOL:
return jsonArg.get_as_bool();
case torch::_export::Argument::Tag::AS_BOOLS: {

View File

@ -4,8 +4,8 @@
#include <ATen/core/function_schema.h>
#include <c10/core/Device.h>
#include <torch/nativert/executor/ExecutionFrame.h> // @manual
#include <torch/nativert/executor/OpKernel.h> // @manual
#include <torch/nativert/executor/ExecutionFrame.h>
#include <torch/nativert/executor/OpKernel.h>
namespace torch::nativert {

View File

@ -3,8 +3,8 @@
#include <c10/core/Device.h>
#include <torch/custom_class.h>
#include <torch/nativert/executor/ExecutionFrame.h> // @manual
#include <torch/nativert/executor/OpKernel.h> // @manual
#include <torch/nativert/executor/ExecutionFrame.h>
#include <torch/nativert/executor/OpKernel.h>
namespace torch::nativert {

View File

@ -6,8 +6,6 @@
namespace torch::nativert {
using torch::nativert::Graph;
HigherOrderKernel::HigherOrderKernel(
const Node* node,
std::vector<std::unique_ptr<GraphExecutorBase>> graphExecutors)

View File

@ -17,10 +17,7 @@ namespace {
class OpKernel_prim_listpack : public OpKernel {
public:
explicit OpKernel_prim_listpack(const Node* node)
: OpKernel(
node,
std::nullopt,
torch::nativert::OpKernelKind::kPrimKernel) {
: OpKernel(node, std::nullopt, OpKernelKind::kPrimKernel) {
auto listType = node->outputs()[0]->type();
switch (listType.kind()) {
case Type::Kind::TensorList:
@ -79,10 +76,7 @@ namespace {
class OpKernel_variadic_concat : public OpKernel {
public:
explicit OpKernel_variadic_concat(const Node* node)
: OpKernel(
node,
std::nullopt,
torch::nativert::OpKernelKind::kPrimKernel) {
: OpKernel(node, std::nullopt, OpKernelKind::kPrimKernel) {
dim_ = node_->attributes().size() > 0
? constantToIValue(node_->getAttribute("dim").value).toInt()
: 0;
@ -127,10 +121,7 @@ namespace {
class OpKernel_variadic_stack : public OpKernel {
public:
explicit OpKernel_variadic_stack(const Node* node)
: OpKernel(
node,
std::nullopt,
torch::nativert::OpKernelKind::kPrimKernel) {
: OpKernel(node, std::nullopt, OpKernelKind::kPrimKernel) {
dim_ = node_->attributes().size() > 0
? constantToIValue(node_->getAttribute("dim").value).toInt()
: 0;

View File

@ -11,19 +11,16 @@ namespace torch::nativert {
TORCH_DECLARE_REGISTRY(PrimKernelRegistry, OpKernel, const Node*);
#define REGISTER_PRIM_KERNEL(name, id, ...) \
class OpKernel_##id : public OpKernel { \
public: \
OpKernel_##id(const Node* node) \
: OpKernel( \
node, \
std::nullopt, \
torch::nativert::OpKernelKind::kPrimKernel) {} \
void computeInternal( \
ExecutionFrame& executionFrame) const override final { \
__VA_ARGS__; \
} \
}; \
#define REGISTER_PRIM_KERNEL(name, id, ...) \
class OpKernel_##id : public OpKernel { \
public: \
OpKernel_##id(const Node* node) \
: OpKernel(node, std::nullopt, OpKernelKind::kPrimKernel) {} \
void computeInternal( \
ExecutionFrame& executionFrame) const override final { \
__VA_ARGS__; \
} \
}; \
C10_REGISTER_TYPED_CLASS(PrimKernelRegistry, name, OpKernel_##id);
inline bool checkResizedDataPtr(at::Tensor& t) {