mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
------ - [Generic TypeAlias (PEP 585)](https://peps.python.org/pep-0585): e.g. `typing.List[T] -> list[T]`, `typing.Dict[KT, VT] -> dict[KT, VT]`, `typing.Type[T] -> type[T]`. - [Union Type (PEP 604)](https://peps.python.org/pep-0604): e.g. `Union[X, Y] -> X | Y`, `Optional[X] -> X | None`, `Optional[Union[X, Y]] -> X | Y | None`. Note that in `.pyi` stub files, we do not need `from __future__ import annotations`. So this PR does not violate issue #117449: - #117449 Pull Request resolved: https://github.com/pytorch/pytorch/pull/129419 Approved by: https://github.com/ezyang ghstack dependencies: #129375, #129376
28 lines
918 B
Python
28 lines
918 B
Python
# mypy: allow-untyped-defs
|
|
from typing import Any
|
|
|
|
import torch
|
|
|
|
# This module is defined in torch/csrc/distributed/autograd/init.cpp
|
|
|
|
class DistAutogradContext:
|
|
def _context_id(self) -> int: ...
|
|
def _recv_functions(self) -> dict[int, Any]: ...
|
|
def _send_functions(self) -> dict[int, Any]: ...
|
|
def _known_worker_ids(self) -> set[int]: ...
|
|
|
|
def _new_context() -> DistAutogradContext: ...
|
|
def _release_context(context_id: int) -> None: ...
|
|
def _get_max_id() -> int: ...
|
|
def _is_valid_context(worker_id: int) -> bool: ...
|
|
def _retrieve_context(context_id: int) -> DistAutogradContext: ...
|
|
def _current_context() -> DistAutogradContext: ...
|
|
def _init(worker_id: int) -> None: ...
|
|
def _get_debug_info() -> dict[str, str]: ...
|
|
def backward(
|
|
context_id: int,
|
|
roots: list[torch.Tensor],
|
|
retain_graph=False,
|
|
) -> None: ...
|
|
def get_gradients(context_id: int) -> dict[torch.Tensor, torch.Tensor]: ...
|