mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Output to stderr in distributed tests. (#42139)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/42139 A bunch of tests were failing with buck since we would output to stdout and buck would fail parsing stdout in some cases. Moving these print statements to stderr fixes this issue. ghstack-source-id: 108606579 Test Plan: Run the offending unit tests. Reviewed By: mrshenli Differential Revision: D22779135 fbshipit-source-id: 789af3b16a03b68a6cb12377ed852e5b5091bbad
This commit is contained in:
committed by
Facebook GitHub Bot
parent
fe4f19e164
commit
872237c1f2
@ -40,7 +40,7 @@ from torch.testing._internal.common_utils import TestCase, load_tests, run_tests
|
||||
load_tests = load_tests
|
||||
|
||||
if not c10d.is_available():
|
||||
print('c10d not available, skipping tests')
|
||||
print('c10d not available, skipping tests', file=sys.stderr)
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
|
@ -20,12 +20,12 @@ from torch.testing._internal.common_utils import NO_MULTIPROCESSING_SPAWN, TEST_
|
||||
load_tests = load_tests
|
||||
|
||||
if not c10d.is_available():
|
||||
print('c10d not available, skipping tests')
|
||||
print('c10d not available, skipping tests', file=sys.stderr)
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if NO_MULTIPROCESSING_SPAWN:
|
||||
print('spawn not available, skipping tests')
|
||||
print('spawn not available, skipping tests', file=sys.stderr)
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
|
@ -116,7 +116,7 @@ def get_timeout(test_id):
|
||||
|
||||
|
||||
if not dist.is_available():
|
||||
print("Distributed not available, skipping tests")
|
||||
print("Distributed not available, skipping tests", file=sys.stderr)
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
import unittest
|
||||
|
||||
import sys
|
||||
import torch
|
||||
import torch.cuda.nccl as nccl
|
||||
import torch.cuda
|
||||
@ -18,7 +19,6 @@ load_tests = load_tests
|
||||
|
||||
nGPUs = torch.cuda.device_count()
|
||||
if not TEST_CUDA:
|
||||
import sys
|
||||
print('CUDA not available, skipping tests', file=sys.stderr)
|
||||
TestCase = object # noqa: F811
|
||||
|
||||
|
@ -37,7 +37,7 @@ TEST_CUDA = torch.cuda.is_available()
|
||||
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
|
||||
|
||||
if not TEST_CUDA:
|
||||
print('CUDA not available, skipping tests')
|
||||
print('CUDA not available, skipping tests', file=sys.stderr)
|
||||
TestCase = object # noqa: F811
|
||||
|
||||
TEST_MAGMA = TEST_CUDA
|
||||
|
@ -1,5 +1,6 @@
|
||||
import torch
|
||||
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
# NOTE: this needs to be run in a brand new process
|
||||
@ -12,7 +13,7 @@ TEST_CUDA = torch.cuda.is_available()
|
||||
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
|
||||
|
||||
if not TEST_CUDA:
|
||||
print('CUDA not available, skipping tests')
|
||||
print('CUDA not available, skipping tests', file=sys.stderr)
|
||||
TestCase = object # noqa: F811
|
||||
|
||||
|
||||
|
@ -3,6 +3,7 @@ from __future__ import absolute_import, division, print_function, unicode_litera
|
||||
import time
|
||||
from functools import partial, wraps
|
||||
import re
|
||||
import sys
|
||||
|
||||
import torch.distributed as dist
|
||||
import torch.distributed.rpc as rpc
|
||||
@ -10,7 +11,7 @@ from torch.distributed.rpc import _rref_context_get_debug_info
|
||||
|
||||
|
||||
if not dist.is_available():
|
||||
print("c10d not available, skipping tests")
|
||||
print("c10d not available, skipping tests", file=sys.stderr)
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user