mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Changes: - #95200 1. Recognize `.py.in` and `.pyi.in` files as Python in VS Code for a better development experience. 2. Fix deep setting merge in `tools/vscode_settings.py`. - #95267 3. Use `Namedtuple` rather than `namedtuple + __annotations__` for `torch.nn.utils.rnn.PackedSequence_`: `namedtuple + __annotations__`: ```python PackedSequence_ = namedtuple('PackedSequence_', ['data', 'batch_sizes', 'sorted_indices', 'unsorted_indices']) # type annotation for PackedSequence_ to make it compatible with TorchScript PackedSequence_.__annotations__ = {'data': torch.Tensor, 'batch_sizes': torch.Tensor, 'sorted_indices': Optional[torch.Tensor], 'unsorted_indices': Optional[torch.Tensor]} ``` `Namedtuple`: Python 3.6+ ```python class PackedSequence_(NamedTuple): data: torch.Tensor batch_sizes: torch.Tensor sorted_indices: Optional[torch.Tensor] unsorted_indices: Optional[torch.Tensor] ``` - => this PR: #95268 4. Sort import statements and remove unnecessary imports in `.pyi`, `.pyi.in` files. 5. Format `.pyi`, `.pyi.in` files and remove unnecessary ellipsis `...` in type stubs. Pull Request resolved: https://github.com/pytorch/pytorch/pull/95268 Approved by: https://github.com/huydhn
27 lines
908 B
Python
27 lines
908 B
Python
from typing import Any, Dict, List, Set
|
|
|
|
import torch
|
|
|
|
# This module is defined in torch/csrc/distributed/autograd/init.cpp
|
|
|
|
class DistAutogradContext:
|
|
def _context_id(self) -> int: ...
|
|
def _recv_functions(self) -> Dict[int, Any]: ...
|
|
def _send_functions(self) -> Dict[int, Any]: ...
|
|
def _known_worker_ids(self) -> Set[int]: ...
|
|
|
|
def _new_context() -> DistAutogradContext: ...
|
|
def _release_context(context_id: int) -> None: ...
|
|
def _get_max_id() -> int: ...
|
|
def _is_valid_context(worker_id: int) -> bool: ...
|
|
def _retrieve_context(context_id: int) -> DistAutogradContext: ...
|
|
def _current_context() -> DistAutogradContext: ...
|
|
def _init(worker_id: int) -> None: ...
|
|
def _get_debug_info() -> Dict[str, str]: ...
|
|
def backward(
|
|
context_id: int,
|
|
roots: List[torch.Tensor],
|
|
retain_graph=False,
|
|
) -> None: ...
|
|
def get_gradients(context_id: int) -> Dict[torch.Tensor, torch.Tensor]: ...
|