mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
[BE][CI][Easy] Run lintrunner
on generated .pyi
stub files (#150732)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/150732 Approved by: https://github.com/malfet, https://github.com/cyyever, https://github.com/aorenste
This commit is contained in:
committed by
PyTorch MergeBot
parent
0a7eef140b
commit
7ae204c3b6
@ -2383,7 +2383,7 @@ class CudaNonDefaultStream:
|
||||
device_type=deviceStream.device_type)
|
||||
torch._C._cuda_setDevice(beforeDevice)
|
||||
|
||||
def __exit__(self, exec_type, exec_value, traceback):
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
# After completing CUDA test load previously active streams on all
|
||||
# CUDA devices.
|
||||
beforeDevice = torch.cuda.current_device()
|
||||
@ -2431,9 +2431,9 @@ class CudaMemoryLeakCheck:
|
||||
driver_mem_allocated = bytes_total - bytes_free
|
||||
self.driver_befores.append(driver_mem_allocated)
|
||||
|
||||
def __exit__(self, exec_type, exec_value, traceback):
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
# Don't check for leaks if an exception was thrown
|
||||
if exec_type is not None:
|
||||
if exc_type is not None:
|
||||
return
|
||||
|
||||
# Compares caching allocator before/after statistics
|
||||
|
Reference in New Issue
Block a user