Enable Leak Sanitizer (#154584)

It enables Leak Sanitizer and also provides a suppression file.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/154584
Approved by: https://github.com/ezyang
This commit is contained in:
cyy
2025-06-23 05:20:23 +00:00
committed by PyTorch MergeBot
parent 9fed2added
commit c79c7bbe61
6 changed files with 41 additions and 7 deletions

View File

@ -216,11 +216,13 @@ fi
# if you're not careful. Check this if you made some changes and the
# ASAN test is not working
if [[ "$BUILD_ENVIRONMENT" == *asan* ]]; then
export ASAN_OPTIONS=detect_leaks=0:symbolize=1:detect_stack_use_after_return=true:strict_init_order=true:detect_odr_violation=1:detect_container_overflow=0:check_initialization_order=true:debug=true
export ASAN_OPTIONS=detect_leaks=1:symbolize=1:detect_stack_use_after_return=true:strict_init_order=true:detect_odr_violation=1:detect_container_overflow=0:check_initialization_order=true:debug=true:fast_unwind_on_malloc=1
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
export ASAN_OPTIONS="${ASAN_OPTIONS}:protect_shadow_gap=0"
fi
export UBSAN_OPTIONS=print_stacktrace=1:suppressions=$PWD/ubsan.supp
# Suppress some hard to solve indirect leaks
export LSAN_OPTIONS="suppressions=$PWD/lsan.supp"
export PYTORCH_TEST_WITH_ASAN=1
export PYTORCH_TEST_WITH_UBSAN=1
# TODO: Figure out how to avoid hard-coding these paths

31
lsan.supp Normal file
View File

@ -0,0 +1,31 @@
leak:pybind11::cpp_function
leak:PyMem_RawMalloc
leak:unicode_resize
leak:PyObject_Malloc
leak:PyByteArray_Resize
leak:numpy
leak:list_append
leak:unicodeobject
leak:obmalloc
leak:gcmodule
leak:listobject
leak:bytesobject
leak:PyThread_allocate_lock
leak:sccache
leak:rustc-1.61.0
leak:gcc/x86_64-linux-gnu/11
leak:x86_64-linux-gnu-gcc-11
leak:libbfd
leak:x86_64-linux-gnu-ld.bfd
leak:git
leak:libio
leak:unknown module
leak:g++
leak:conda-linux-gnu-ld
leak:crypto
leak:torch::detail::(anonymous namespace)::get_set_cached_attr
leak:torch::jit::tensorexpr::TensorExprKernel::preAllocIntermediateBufs
leak:optree
leak:python
leak:torch::tensors::initialize_aten_types
leak:libclang_rt

View File

@ -23,6 +23,7 @@ from torch.testing._internal.common_utils import (
find_free_port,
munge_exc,
skipIfTorchDynamo,
TEST_WITH_ASAN,
xfailIfS390X,
)
from torch.testing._internal.inductor_utils import HAS_CUDA
@ -524,6 +525,7 @@ LoweringException: AssertionError:
with self.assertRaises(ValueError):
torch._logging.set_logs(aot_graphs=5)
@unittest.skipIf(TEST_WITH_ASAN, "LSAN outputs suppression report on stderr")
def test_invalid_artifact_flag_error_msg(self):
env = dict(os.environ)
env["TORCH_LOGS"] = "not_an_existing_log_artifact_should_error"
@ -856,6 +858,7 @@ TRACE FX call mul from test_logging.py:N in fn (LoggingTests.test_trace_call_pre
# there are some additional deprecation warnings in stderr, probably due to newer dependencies used on s390x
@xfailIfS390X
@unittest.skipIf(TEST_WITH_ASAN, "LSAN outputs suppression report on stderr")
def test_logs_out(self):
import tempfile

View File

@ -235,7 +235,7 @@ static PyObject* THPModule_initExtension(
END_HANDLE_TH_ERRORS
}
// The idea behind these two functions is to make it easy to test if we are
// The idea behind these functions is to make it easy to test if we are
// built with ASAN: they're designed not to crash if ASAN is not enabled, but
// to trigger ASAN if it is enabled. This lets us run a "canary" tests which
// checks if our build environment is misconfigured.

View File

@ -456,7 +456,7 @@ static Tensor detach(c10::DispatchKeySet ks, const Tensor& self) {
// NB: we can't make detach() a normal view operator because the codegen
// generates allow_tensor_metadata_change = True for them. In the future we
// should have an option for this in the codegen.
auto result = as_view(
return as_view(
/* base */ self,
/* output */ out,
/* is_bw_differentiable */ false,
@ -465,8 +465,6 @@ static Tensor detach(c10::DispatchKeySet ks, const Tensor& self) {
/* rev_view_func */ nullptr,
/* creation_meta */ CreationMeta::DEFAULT,
/*allow_tensor_metadata_change=*/false);
return result;
}
static Tensor _fw_primal(

View File

@ -844,7 +844,7 @@ inline Variable make_variable_non_differentiable_view(
/*version_counter=*/impl::version_counter(base),
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
data_impl_copy->set_autograd_meta(nullptr);
return Variable(data_impl_copy);
return Variable(std::move(data_impl_copy));
}
return Variable();
}
@ -903,7 +903,7 @@ inline Variable make_variable(
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
data_impl_copy->set_autograd_meta(std::make_unique<AutogradMeta>(
data_impl_copy.get(), false, std::move(gradient_edge)));
return Variable(data_impl_copy);
return Variable(std::move(data_impl_copy));
}
return Variable();
}