mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
restore hidden visibility by default for Linux builds (#20461)
Summary: Symbols are given hidden visibility by default on Linux to emulate the behavior on Windows. This helps developers catch visibility issues in their streamlined Linux dev environment before being surprised, late in the process, by Windows errors. Pull Request resolved: https://github.com/pytorch/pytorch/pull/20461 Reviewed By: kostmo Differential Revision: D15410410 Pulled By: dzhulgakov fbshipit-source-id: 1d684b5a9a80b692966a775c3f1c56b7c72ffc95
This commit is contained in:
committed by
Facebook Github Bot
parent
be1f83c350
commit
0bfc0eeef7
@ -29,11 +29,11 @@ export ASAN_OPTIONS=detect_leaks=0:symbolize=1
|
|||||||
# [2] https://wiki.gentoo.org/wiki/AddressSanitizer/Problems
|
# [2] https://wiki.gentoo.org/wiki/AddressSanitizer/Problems
|
||||||
# [3] https://github.com/Kitware/CMake/commit/e9a1ddc594de6e6251bf06d732775dae2cabe4c8
|
# [3] https://github.com/Kitware/CMake/commit/e9a1ddc594de6e6251bf06d732775dae2cabe4c8
|
||||||
#
|
#
|
||||||
# TODO: Make the ASAN flags a more unified env var
|
# TODO: Make the ASAN flags a centralized env var and unify with USE_ASAN option
|
||||||
CC="clang" CXX="clang++" LDSHARED="clang --shared" \
|
CC="clang" CXX="clang++" LDSHARED="clang --shared" \
|
||||||
CFLAGS="-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -shared-libasan -pthread" \
|
CFLAGS="-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -shared-libasan -pthread" \
|
||||||
CXX_FLAGS="-pthread" \
|
CXX_FLAGS="-pthread" \
|
||||||
NO_CUDA=1 USE_MKLDNN=0 \
|
USE_ASAN=1 NO_CUDA=1 USE_MKLDNN=0 \
|
||||||
python setup.py install
|
python setup.py install
|
||||||
|
|
||||||
assert_git_not_dirty
|
assert_git_not_dirty
|
||||||
|
@ -818,9 +818,15 @@ if(USE_ROCM)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
||||||
if (NOT WIN32)
|
if (NOT WIN32 AND NOT USE_ASAN)
|
||||||
# FIXME kostmo
|
# Enable hidden visibility by default to make it easier to debug issues with
|
||||||
# target_compile_options(caffe2 PRIVATE "-fvisibility=hidden")
|
# TORCH_API annotations. Hidden visibility with selective default visibility
|
||||||
|
# behaves close enough to Windows' dllimport/dllexport.
|
||||||
|
#
|
||||||
|
# Unfortunately, hidden visibility messes up some ubsan warnings because
|
||||||
|
# templated classes crossing library boundary get duplicated (but identical)
|
||||||
|
# definitions. It's easier to just disable it.
|
||||||
|
target_compile_options(caffe2 PRIVATE "-fvisibility=hidden")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
@ -31,6 +31,7 @@
|
|||||||
#include <test/cpp/jit/test_qualified_name.h>
|
#include <test/cpp/jit/test_qualified_name.h>
|
||||||
#include <test/cpp/jit/test_subgraph_matcher.h>
|
#include <test/cpp/jit/test_subgraph_matcher.h>
|
||||||
#include <test/cpp/jit/test_subgraph_utils.h>
|
#include <test/cpp/jit/test_subgraph_utils.h>
|
||||||
|
#include <torch/csrc/WindowsTorchApiMacro.h>
|
||||||
|
|
||||||
using namespace torch::jit::script;
|
using namespace torch::jit::script;
|
||||||
using namespace torch::jit::test;
|
using namespace torch::jit::test;
|
||||||
@ -110,7 +111,7 @@ TH_FORALL_TESTS_CUDA(JIT_GTEST_CUDA)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define JIT_TEST(name) test##name();
|
#define JIT_TEST(name) test##name();
|
||||||
void runJITCPPTests(bool runCuda) {
|
TORCH_API void runJITCPPTests(bool runCuda) {
|
||||||
TH_FORALL_TESTS(JIT_TEST)
|
TH_FORALL_TESTS(JIT_TEST)
|
||||||
if (runCuda) {
|
if (runCuda) {
|
||||||
TH_FORALL_TESTS_CUDA(JIT_TEST)
|
TH_FORALL_TESTS_CUDA(JIT_TEST)
|
||||||
|
@ -211,6 +211,7 @@ def run_cmake(version,
|
|||||||
USE_REDIS=os.getenv('USE_REDIS'),
|
USE_REDIS=os.getenv('USE_REDIS'),
|
||||||
USE_GLOG=os.getenv('USE_GLOG'),
|
USE_GLOG=os.getenv('USE_GLOG'),
|
||||||
USE_GFLAGS=os.getenv('USE_GFLAGS'),
|
USE_GFLAGS=os.getenv('USE_GFLAGS'),
|
||||||
|
USE_ASAN=check_env_flag('USE_ASAN'),
|
||||||
WERROR=os.getenv('WERROR'))
|
WERROR=os.getenv('WERROR'))
|
||||||
|
|
||||||
if os.getenv('_GLIBCXX_USE_CXX11_ABI'):
|
if os.getenv('_GLIBCXX_USE_CXX11_ABI'):
|
||||||
|
@ -78,7 +78,7 @@ struct ConvOptions {
|
|||||||
|
|
||||||
/// Base class for all (dimension-specialized) convolution modules.
|
/// Base class for all (dimension-specialized) convolution modules.
|
||||||
template <size_t D, typename Derived>
|
template <size_t D, typename Derived>
|
||||||
class ConvImpl : public torch::nn::Cloneable<Derived> {
|
class TORCH_API ConvImpl : public torch::nn::Cloneable<Derived> {
|
||||||
public:
|
public:
|
||||||
ConvImpl(
|
ConvImpl(
|
||||||
int64_t input_channels,
|
int64_t input_channels,
|
||||||
|
@ -40,7 +40,7 @@ class DropoutImplBase : public torch::nn::Cloneable<Derived> {
|
|||||||
/// about the exact semantics of this module.
|
/// about the exact semantics of this module.
|
||||||
class TORCH_API DropoutImpl : public detail::DropoutImplBase<DropoutImpl> {
|
class TORCH_API DropoutImpl : public detail::DropoutImplBase<DropoutImpl> {
|
||||||
public:
|
public:
|
||||||
using detail::DropoutImplBase<DropoutImpl>::DropoutImplBase;
|
explicit DropoutImpl(DropoutOptions options_ = DropoutOptions());
|
||||||
|
|
||||||
/// During training, applies a noise mask to the input tensor.
|
/// During training, applies a noise mask to the input tensor.
|
||||||
/// During evaluation, applies an identity function.
|
/// During evaluation, applies an identity function.
|
||||||
@ -62,7 +62,7 @@ class TORCH_API DropoutImpl : public detail::DropoutImplBase<DropoutImpl> {
|
|||||||
class TORCH_API FeatureDropoutImpl
|
class TORCH_API FeatureDropoutImpl
|
||||||
: public detail::DropoutImplBase<FeatureDropoutImpl> {
|
: public detail::DropoutImplBase<FeatureDropoutImpl> {
|
||||||
public:
|
public:
|
||||||
using detail::DropoutImplBase<FeatureDropoutImpl>::DropoutImplBase;
|
explicit FeatureDropoutImpl(DropoutOptions options_ = DropoutOptions());
|
||||||
|
|
||||||
/// During training, applies a noise mask to the input tensor.
|
/// During training, applies a noise mask to the input tensor.
|
||||||
/// During evaluation, applies an identity function.
|
/// During evaluation, applies an identity function.
|
||||||
|
@ -53,7 +53,7 @@ struct TORCH_API RNNOptionsBase {
|
|||||||
|
|
||||||
/// Base class for all RNN implementations (intended for code sharing).
|
/// Base class for all RNN implementations (intended for code sharing).
|
||||||
template <typename Derived>
|
template <typename Derived>
|
||||||
class RNNImplBase : public torch::nn::Cloneable<Derived> {
|
class TORCH_API RNNImplBase : public torch::nn::Cloneable<Derived> {
|
||||||
public:
|
public:
|
||||||
/// These must line up with the CUDNN mode codes:
|
/// These must line up with the CUDNN mode codes:
|
||||||
/// https://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnRNNMode_t
|
/// https://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnRNNMode_t
|
||||||
|
@ -27,6 +27,8 @@ template class DropoutImplBase<FeatureDropoutImpl>;
|
|||||||
|
|
||||||
DropoutOptions::DropoutOptions(double rate) : rate_(rate) {}
|
DropoutOptions::DropoutOptions(double rate) : rate_(rate) {}
|
||||||
|
|
||||||
|
DropoutImpl::DropoutImpl(DropoutOptions options_) : DropoutImplBase(options_) {}
|
||||||
|
|
||||||
Tensor DropoutImpl::forward(const Tensor& input) {
|
Tensor DropoutImpl::forward(const Tensor& input) {
|
||||||
return torch::dropout(input, options.rate_, this->is_training());
|
return torch::dropout(input, options.rate_, this->is_training());
|
||||||
}
|
}
|
||||||
@ -35,6 +37,9 @@ void DropoutImpl::pretty_print(std::ostream& stream) const {
|
|||||||
stream << "torch::nn::Dropout(rate=" << options.rate_ << ")";
|
stream << "torch::nn::Dropout(rate=" << options.rate_ << ")";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
FeatureDropoutImpl::FeatureDropoutImpl(DropoutOptions options_)
|
||||||
|
: DropoutImplBase(options_) {}
|
||||||
|
|
||||||
Tensor FeatureDropoutImpl::forward(const Tensor& input) {
|
Tensor FeatureDropoutImpl::forward(const Tensor& input) {
|
||||||
return torch::feature_dropout(input, options.rate_, this->is_training());
|
return torch::feature_dropout(input, options.rate_, this->is_training());
|
||||||
}
|
}
|
||||||
|
@ -27,6 +27,8 @@ std::list<std::shared_ptr<RangeEventList>> all_event_lists;
|
|||||||
thread_local std::shared_ptr<RangeEventList> event_list;
|
thread_local std::shared_ptr<RangeEventList> event_list;
|
||||||
thread_local uint16_t thread_id;
|
thread_local uint16_t thread_id;
|
||||||
|
|
||||||
|
ProfilerConfig::~ProfilerConfig() = default;
|
||||||
|
|
||||||
RangeEventList& getEventList() {
|
RangeEventList& getEventList() {
|
||||||
if (!event_list) {
|
if (!event_list) {
|
||||||
std::lock_guard<std::mutex> guard(all_event_lists_mutex);
|
std::lock_guard<std::mutex> guard(all_event_lists_mutex);
|
||||||
|
@ -101,9 +101,10 @@ enum class TORCH_API ProfilerState {
|
|||||||
NVTX, // only emit NVTX markers
|
NVTX, // only emit NVTX markers
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ProfilerConfig {
|
struct TORCH_API ProfilerConfig {
|
||||||
ProfilerConfig(ProfilerState state, bool report_input_shapes)
|
ProfilerConfig(ProfilerState state, bool report_input_shapes)
|
||||||
: state(state), report_input_shapes(report_input_shapes) {}
|
: state(state), report_input_shapes(report_input_shapes) {}
|
||||||
|
~ProfilerConfig();
|
||||||
ProfilerState state;
|
ProfilerState state;
|
||||||
bool report_input_shapes;
|
bool report_input_shapes;
|
||||||
};
|
};
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
#include <torch/csrc/jit/constants.h>
|
#include <torch/csrc/jit/constants.h>
|
||||||
#include <torch/csrc/jit/ir.h>
|
#include <torch/csrc/jit/ir.h>
|
||||||
#include <torch/csrc/jit/operator.h>
|
#include <torch/csrc/jit/operator.h>
|
||||||
|
#include <torch/csrc/jit/passes/requires_grad_analysis.h>
|
||||||
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
@ -29,23 +29,30 @@ struct Value;
|
|||||||
//
|
//
|
||||||
// So, by traversing the "points-to" graph to the leaves, you can determine
|
// So, by traversing the "points-to" graph to the leaves, you can determine
|
||||||
// which memory locations an element may point to.
|
// which memory locations an element may point to.
|
||||||
class MemoryDAG {
|
class TORCH_API MemoryDAG {
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
// explicitly delete copy constructor because otherwise windows build is confused for an exported class
|
||||||
|
// see https://stackoverflow.com/a/51033485/105137
|
||||||
|
MemoryDAG() {}
|
||||||
|
MemoryDAG(const MemoryDAG&)=delete;
|
||||||
|
MemoryDAG& operator=(const MemoryDAG&)=delete;
|
||||||
|
|
||||||
// Make `from` point at `to`.
|
// Make `from` point at `to`.
|
||||||
TORCH_API void makePointerTo(Element* from, Element* to);
|
void makePointerTo(Element* from, Element* to);
|
||||||
|
|
||||||
void addToContainedElements(Element* contained, Element* container);
|
void addToContainedElements(Element* contained, Element* container);
|
||||||
|
|
||||||
// Make a fresh element (i.e. an element that doesn't point to anything) and
|
// Make a fresh element (i.e. an element that doesn't point to anything) and
|
||||||
// return it.
|
// return it.
|
||||||
TORCH_API Element* makeFreshValue(const Value* v);
|
Element* makeFreshValue(const Value* v);
|
||||||
|
|
||||||
// Do `a` and `b` potentially share a memory location?
|
// Do `a` and `b` potentially share a memory location?
|
||||||
bool mayAlias(const Element* a, const Element* b) const;
|
bool mayAlias(const Element* a, const Element* b) const;
|
||||||
TORCH_API bool mayAlias(Element* a, Element* b) const;
|
bool mayAlias(Element* a, Element* b) const;
|
||||||
|
|
||||||
// Does a hold reference to any memory that is stored in elem, or vice versa?
|
// Does a hold reference to any memory that is stored in elem, or vice versa?
|
||||||
TORCH_API bool mayContainAlias(const Element* a, const Element* b) const;
|
bool mayContainAlias(const Element* a, const Element* b) const;
|
||||||
bool mayContainAlias(Element* a, Element* b) const;
|
bool mayContainAlias(Element* a, Element* b) const;
|
||||||
|
|
||||||
bool mayContainAlias(
|
bool mayContainAlias(
|
||||||
|
@ -1,2 +1,3 @@
|
|||||||
vptr:libtorch.so
|
vptr:libtorch.so
|
||||||
|
vptr:libcaffe2.so
|
||||||
bounds:asmjit::Zone::_alloc
|
bounds:asmjit::Zone::_alloc
|
||||||
|
Reference in New Issue
Block a user