[3/N] Use internal linkage in C++ files (#151297)

Follows #151070.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/151297
Approved by: https://github.com/Skylion007
This commit is contained in:
cyy
2025-05-05 17:48:34 +00:00
committed by PyTorch MergeBot
parent 99287b170b
commit 45efa1aaa8
14 changed files with 35 additions and 26 deletions

View File

@ -16,8 +16,6 @@ using namespace torch;
using namespace torch::jit;
using namespace torch::jit::test;
C10_DECLARE_bool(static_runtime_disable_debug_memory_overlap_check);
namespace {
StaticModule makeStaticModuleFromScript(const std::string& script) {

View File

@ -9,6 +9,7 @@
#include <mutex>
#include <vector>
#include "c10/util/Flags.h"
#include "caffe2/core/common.h"
//
@ -65,4 +66,14 @@ class TORCH_API /*alignas(kCacheLineSize)*/ ThreadPool {
size_t getDefaultNumThreads();
} // namespace caffe2
C10_DECLARE_bool(caffe2_threadpool_force_inline);
// Whether or not threadpool caps apply to Android
C10_DECLARE_int(caffe2_threadpool_android_cap);
// Whether or not threadpool caps apply to iOS and MacOS
C10_DECLARE_int(caffe2_threadpool_ios_cap);
C10_DECLARE_int(caffe2_threadpool_macos_cap);
C10_DECLARE_int(pthreadpool_size);
#endif // CAFFE2_UTILS_THREADPOOL_H_

View File

@ -1,9 +1,8 @@
#include <caffe2/utils/threadpool/pthreadpool-cpp.h>
#include <caffe2/utils/threadpool/thread_pool_guard.h>
#include <caffe2/utils/threadpool/ThreadPool.h>
#include <c10/util/Exception.h>
#include <atomic>
namespace {
// After fork, the child process inherits the data-structures of the parent
// process' thread-pool, but since those threads don't exist, the thread-pool
@ -102,9 +101,6 @@ PThreadPool* pthreadpool(size_t thread_count) {
return threadpool.get();
}
// Forward declaration
size_t getDefaultNumThreads();
PThreadPool* pthreadpool() {
return pthreadpool(getDefaultNumThreads());
}

View File

@ -2,7 +2,7 @@
namespace caffe2 {
thread_local bool _NoPThreadPoolGuard_enabled = false;
static thread_local bool _NoPThreadPoolGuard_enabled = false;
bool _NoPThreadPoolGuard::is_enabled() {
return _NoPThreadPoolGuard_enabled;

View File

@ -272,7 +272,7 @@ PY_RAW_GETSETDEF_STRUCT = CodeTemplate(
# Getter templates
GETTER_DEFINITION = CodeTemplate(
"""\
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
static PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
auto prop = static_cast<${op}*>(self->cdata.get())->${name};
${body}
@ -283,7 +283,7 @@ PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
GETTER_DEFINITION_SAVEDVAR = CodeTemplate(
"""\
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
static PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
const auto& prop = static_cast<${op}*>(self->cdata.get())->${name}_;
${body}
@ -294,7 +294,7 @@ PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
GETTER_DEFINITION_RAW_SAVEDVAR = CodeTemplate(
"""\
PyObject* THP${op}_${name}_raw_getter(THPCppFunction *self, void *_unused) {
static PyObject* THP${op}_${name}_raw_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
const auto& prop = static_cast<${op}*>(self->cdata.get())->${name}_;
${body}
@ -305,7 +305,7 @@ PyObject* THP${op}_${name}_raw_getter(THPCppFunction *self, void *_unused) {
GETTER_DEFINITION_VEC_SAVEDVAR = CodeTemplate(
"""\
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
static PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
const auto *node = static_cast<${op}*>(self->cdata.get());
const auto& prop = node->${name}_;
@ -321,7 +321,7 @@ PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
GETTER_DEFINITION_RAW_VEC_SAVEDVAR = CodeTemplate(
"""\
PyObject* THP${op}_${name}_raw_getter(THPCppFunction *self, void *_unused) {
static PyObject* THP${op}_${name}_raw_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
const auto *node = static_cast<${op}*>(self->cdata.get());
const auto& prop = node->${name}_;
@ -337,7 +337,7 @@ PyObject* THP${op}_${name}_raw_getter(THPCppFunction *self, void *_unused) {
GETTER_DEFINITION_OPT = CodeTemplate(
"""\
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
static PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
auto opt_prop = static_cast<${op}*>(self->cdata.get())->${name};
if (!opt_prop.has_value()) {
@ -352,7 +352,7 @@ PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
GETTER_DEFINITION_OPT_ARRAYREF = CodeTemplate(
"""\
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
static PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
auto opt_prop = static_cast<${op}*>(self->cdata.get())->${name};
if (!opt_prop.list.has_value()) {
@ -832,7 +832,7 @@ def process_function(info: DifferentiabilityInfo, template: CodeTemplate) -> str
getter_definitions.append(
CodeTemplate(
"""\
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
static PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
const auto *node = static_cast<${op}*>(self->cdata.get());
const auto& prop = node->${name};

View File

@ -842,7 +842,7 @@ static PyObject * THPVariable_requires_grad_(PyObject* self, PyObject* args, PyO
END_HANDLE_TH_ERRORS
}
inline bool dispatch_is_contiguous(const Tensor & self, MemoryFormat memory_format) {
static inline bool dispatch_is_contiguous(const Tensor & self, MemoryFormat memory_format) {
return self.is_contiguous(memory_format);
}

View File

@ -176,5 +176,5 @@ struct TORCH_API GraphFunction : public Function {
TORCH_API GraphFunction* tryToGraphFunction(Function&) noexcept;
TORCH_API GraphFunction& toGraphFunction(Function&);
TORCH_API const GraphFunction& toGraphFunction(const Function&);
} // namespace torch::jit
C10_DECLARE_bool(torch_jit_do_not_store_optimized_graph);

View File

@ -18,8 +18,6 @@
namespace torch::jit {
namespace mobile {
char const* toString(OpCode op);
namespace {
/**

View File

@ -163,7 +163,8 @@ std::optional<IValue> toTypeInferredIValueOptional(py::handle input) {
}
} // anonymous namespace
#if !defined(USE_ROCM)
#if defined(BUILDING_TESTS) && !defined(USE_ROCM)
// NOLINTNEXTLINE(misc-use-internal-linkage)
TORCH_API void runJITCPPTests();
#endif

View File

@ -7,6 +7,11 @@ TORCH_DECLARE_bool(torch_jit_static_then_dynamic);
TORCH_DECLARE_bool(torch_jit_always_dynamic);
C10_DECLARE_bool(torch_jit_release_profiling_graph_after_optimization);
C10_DECLARE_int32(torch_jit_release_profiling_graph_delay_in_seconds);
C10_DECLARE_int64(torch_jit_num_profiled_runs);
C10_DECLARE_int64(torch_jit_bailout_depth);
namespace torch::jit {
TORCH_API void runNooptPassPipeline(std::shared_ptr<Graph>& graph);

View File

@ -1145,3 +1145,4 @@ class TORCH_API StaticRuntime {
};
} // namespace torch::jit
C10_DECLARE_bool(static_runtime_disable_debug_memory_overlap_check);

View File

@ -182,5 +182,6 @@ bool sr_schema_check(
}
bool sr_schema_check_kind(torch::jit::Node* node, c10::Symbol node_kind);
} // namespace torch::jit
C10_DECLARE_bool(static_runtime_enable_fast_math);

View File

@ -86,4 +86,5 @@ TORCH_API void UseInPlaceGetRealInputsFromOptionalInputsV2(
TORCH_API void PrepackWeights(std::shared_ptr<Graph>& graph);
C10_DECLARE_bool(enable_clip_ranges_gather_fusions);
} // namespace torch::jit

View File

@ -129,12 +129,9 @@ UPGRADER_CPP_SRC = CodeTemplate(
MOBILE_UPGRADERS_HEADER_DESCRIPTION
+ """
#include <caffe2/serialize/versions.h>
#include <torch/csrc/jit/mobile/type_parser.h>
#include <torch/csrc/jit/mobile/upgrader_mobile.h>
namespace c10 {
TypePtr parseType(const std::string& pythonStr);
} // namespace c10
namespace torch {
namespace jit {