mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
Fix periodic debug tests failing due to FakeProcessGroup things (#165479)
These happen when building with CMAKE_BUILD_TYPE=RelWithAssert This should fix two types of failures that started with https://github.com/pytorch/pytorch/pull/163665 Disclaimer that I used a lot of AI since I don't how pybind works or what refcounts and pointers are, so idk if this is a good solution, or even a solution at all (fwiw the tests pass now) The first one type is Truncated: ``` default_pg, _ = _new_process_group_helper( File "/opt/conda/envs/py_3.10/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py", line 2096, in _new_process_group_helper backend_class = creator_fn(dist_backend_opts, backend_options) File "/opt/conda/envs/py_3.10/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py", line 25, in _create_fake_pg return FakeProcessGroup._create_internal( RuntimeError: new_refcount != 1 INTERNAL ASSERT FAILED at "/var/lib/jenkins/workspace/c10/util/intrusive_ptr.h":319, please report a bug to PyTorch. intrusive_ptr: Cannot increase refcount after it reached zero. Exception raised from retain_ at /var/lib/jenkins/workspace/c10/util/intrusive_ptr.h:319 (most recent call first): C++ CapturedTraceback: #4 std::_Function_handler<std::shared_ptr<c10::LazyValue<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > const> (), c10::SetStackTraceFetcher(std::function<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > ()>)::{lambda()#1}>::_M_invoke(std::_Any_data const&) from Logging.cpp:0 #5 c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) from ??:0 #6 c10::detail::torchCheckFail(char const*, char const*, unsigned int, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) from ??:0 #7 c10::detail::torchInternalAssertFail(char const*, char const*, unsigned int, char const*, char const*) from ??:0 #8 void pybind11::class_<c10d::FakeProcessGroup, (anonymous namespace)::IntrusivePtrNoGilDestructor<c10d::FakeProcessGroup> >::init_instance<(anonymous namespace)::IntrusivePtrNoGilDestructor<c10d::FakeProcessGroup>, 0>(pybind11::detail::instance*, void const*) from init.cpp:0 #9 pybind11::detail::type_caster_generic::cast(void const*, pybind11::return_value_policy, pybind11::handle, pybind11::detail::type_info const*, void* (*)(void const*), void* (*)(void const*), void const*) from :0 #10 pybind11::cpp_function::initialize<torch::distributed::c10d::(anonymous namespace)::c10d_init(_object*, _object*)::{lambda(int, int, c10::intrusive_ptr<c10d::FakeProcessGroup::Options, c10::detail::intrusive_target_default_null_type<c10d::FakeProcessGroup::Options> >)#127}, c10::intrusive_ptr<c10d::FakeProcessGroup, c10::detail::intrusive_target_default_null_type<c10d::FakeProcessGroup> >, int, int, c10::intrusive_ptr<c10d::FakeProcessGroup::Options, c10::detail::intrusive_target_default_null_type<c10d::FakeProcessGroup::Options> >, pybind11::name, pybind11::scope, pybind11::sibling, pybind11::arg, pybind11::arg, pybind11::arg_v>(torch::distributed::c10d::(anonymous namespace)::c10d_init(_object*, _object*)::{lambda(int, int, c10::intrusive_ptr<c10d::FakeProcessGroup::Options, c10::detail::intrusive_target_default_null_type<c10d::FakeProcessGroup::Options> >)#127}&&, c10::intrusive_ptr<c10d::FakeProcessGroup, c10::detail::intrusive_target_default_null_type<c10d::FakeProcessGroup> > (*)(int, int, c10::intrusive_ptr<c10d::FakeProcessGroup::Options, c10::detail::intrusive_target_default_null_type<c10d::FakeProcessGroup::Options> >), pybind11::name const&, pybind11::scope const&, pybind11::sibling const&, pybind11::arg const&, pybind11::arg const&, pybind11::arg_v const&)::{lambda(pybind11::detail::function_call&)#3}::_FUN(pybind11::detail::function_call&) from init.cpp:0 ``` and I fix it here by getting rid of `DontIncreaseRefcount` and using make_intrusive to do the ref count handling instead. However, I also had to move the constructor to be public, which I think is not good, based on the reasoning of the original PR The other one type is ``` Traceback (most recent call last): File "/var/lib/jenkins/workspace/test/test_testing.py", line 2415, in test_no_warning_on_import self.assertEqual(out, "") File "/opt/conda/envs/py_3.10/lib/python3.10/site-packages/torch/testing/_internal/common_utils.py", line 4233, in assertEqual raise error_metas.pop()[0].to_error( # type: ignore[index] AssertionError: String comparison failed: "/opt/conda/envs/py_3.10/lib/python3.10/s[352 chars]):\n" != '' - /opt/conda/envs/py_3.10/lib/python3.10/site-packages/torch/distributed/__init__.py:29: FutureWarning: pybind11-bound class 'torch._C._distributed_c10d.FakeProcessGroup' is using an old-style placement-new '__init__' which has been deprecated. See the upgrade guide in pybind11's docs. This message is only visible when compiled in debug mode. - if is_available() and not torch._C._c10d_init(): To execute this test, run the following from the base repo dir: python test/test_testing.py TestImports.test_no_warning_on_import ``` which I fix by getting rid of the `__init__` which I think is ok since it'll just error if you try to make one? Pull Request resolved: https://github.com/pytorch/pytorch/pull/165479 Approved by: https://github.com/ezyang
This commit is contained in:
committed by
PyTorch MergeBot
parent
7a97832585
commit
0aa7ebaf03
@ -273,12 +273,7 @@ class TestFakePG(TestCase):
|
||||
kwargs = {}
|
||||
return func(*args, **kwargs)
|
||||
|
||||
with self.assertRaisesRegex(
|
||||
RuntimeError,
|
||||
r"FakeProcessGroup cannot be constructed directly\. "
|
||||
r"Use torch\.distributed\.init_process_group\(backend='fake'\) instead to ensure "
|
||||
r"proper dispatch system integration\.",
|
||||
):
|
||||
with self.assertRaisesRegex(TypeError, r"No constructor defined"):
|
||||
fake_pg = FakeProcessGroup(rank=0, world_size=3)
|
||||
|
||||
with SimpleTensorMode():
|
||||
|
@ -607,7 +607,6 @@ class ProcessGroup:
|
||||
def group_desc(self) -> str: ...
|
||||
|
||||
class FakeProcessGroup(Backend):
|
||||
def __init__(self, rank: int, world_size: int) -> None: ...
|
||||
@staticmethod
|
||||
def _create_internal(rank: int, world_size: int) -> FakeProcessGroup: ...
|
||||
|
||||
|
@ -33,9 +33,8 @@ class FakeProcessGroup : public Backend {
|
||||
int rank,
|
||||
int size,
|
||||
c10::intrusive_ptr<Options> options = c10::make_intrusive<Options>()) {
|
||||
return c10::intrusive_ptr<FakeProcessGroup>(
|
||||
new FakeProcessGroup(rank, size, std::move(options)),
|
||||
c10::raw::DontIncreaseRefcount{});
|
||||
return c10::make_intrusive<FakeProcessGroup>(
|
||||
rank, size, std::move(options));
|
||||
}
|
||||
|
||||
const std::string getBackendName() const override {
|
||||
@ -238,12 +237,12 @@ class FakeProcessGroup : public Backend {
|
||||
return c10::make_intrusive<FakeWork>();
|
||||
}
|
||||
|
||||
private:
|
||||
// Private constructor used by official APIs
|
||||
FakeProcessGroup(int rank, int size, c10::intrusive_ptr<Options> options)
|
||||
: Backend(rank, size), options_(std::move(options)) {}
|
||||
c10::intrusive_ptr<Options> options_;
|
||||
|
||||
private:
|
||||
void checkCollectiveError() {
|
||||
TORCH_CHECK(
|
||||
!options_ || !options_->error_on_collective,
|
||||
|
@ -3838,17 +3838,6 @@ such as `dist.all_reduce(tensor, async_op=True)`.
|
||||
py::arg("world_size"),
|
||||
py::arg("options") =
|
||||
c10::make_intrusive<::c10d::FakeProcessGroup::Options>())
|
||||
.def(
|
||||
"__init__",
|
||||
[](const py::object&,
|
||||
const py::args& args,
|
||||
const py::kwargs& kwargs) {
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"FakeProcessGroup cannot be constructed directly. "
|
||||
"Use torch.distributed.init_process_group(backend='fake') instead to ensure "
|
||||
"proper dispatch system integration.");
|
||||
})
|
||||
.def_property_readonly(
|
||||
"options", &::c10d::FakeProcessGroup::getBackendOptions);
|
||||
auto fakeWork =
|
||||
|
Reference in New Issue
Block a user