mirror of
				https://github.com/pytorch/pytorch.git
				synced 2025-11-01 04:54:55 +08:00 
			
		
		
		
	Compare commits
	
		
			180 Commits
		
	
	
		
			gh/karthic
			...
			revert-cpp
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 2eacbe792a | |||
| 8110ce02a2 | |||
| 43c30f607e | |||
| 5ebf74a655 | |||
| acd936cc1a | |||
| a4a0378e6b | |||
| ac841267a1 | |||
| 0eacd934bc | |||
| 5016e7b2eb | |||
| 544b443ea1 | |||
| 3041ede082 | |||
| 34d6ef7022 | |||
| 110efe4df4 | |||
| e137cd0a10 | |||
| be28329710 | |||
| 85a7c745aa | |||
| 32fe4f681e | |||
| ebb2b2e894 | |||
| 13413b3b07 | |||
| 5d0b3e28dc | |||
| 9139368b64 | |||
| 02095cc09d | |||
| 65868156c6 | |||
| f93ea7dab1 | |||
| a77f5d9a00 | |||
| ff46d5a79b | |||
| f452edd782 | |||
| ea698e8bfc | |||
| 7f7a28046b | |||
| d8283a317a | |||
| e0ca3049c0 | |||
| 8417981c96 | |||
| 06e71c8558 | |||
| a76b59cc45 | |||
| 74336f8c77 | |||
| 236ce736a1 | |||
| 17bdb232e1 | |||
| add37bacda | |||
| 1425b40f29 | |||
| 8af9ed0824 | |||
| 7045aab143 | |||
| 7ae8aaf4c0 | |||
| f2450798cd | |||
| 46d17e8871 | |||
| dc011d3203 | |||
| e95920e3e6 | |||
| 5e769ff867 | |||
| 0ae3e30621 | |||
| 47f50cfd45 | |||
| a51f877287 | |||
| b44423bbb4 | |||
| 8e1e4ee8e0 | |||
| 1e836bc769 | |||
| 9a91486e45 | |||
| 92381a5aa7 | |||
| 2a5f87decf | |||
| 840d63c12d | |||
| 2ce894bb1d | |||
| 47ec1e9990 | |||
| 904abfc2ca | |||
| 7d16fcf2df | |||
| 483845a9c4 | |||
| 60bcb4ee88 | |||
| ee7434be82 | |||
| d049ed2cb1 | |||
| 9901d44418 | |||
| 6096c0fc74 | |||
| f6951cb8ea | |||
| 8887a33ede | |||
| 36a48e7e6d | |||
| c6a02eae5b | |||
| 6ecd6b23b6 | |||
| 3f69b4d9b4 | |||
| a04edcb27a | |||
| eb2bad5bb5 | |||
| a076b4d7ac | |||
| a988510c33 | |||
| 99e07c39ec | |||
| 610c09f8f4 | |||
| 61bad3c1ea | |||
| f89a7e9fe8 | |||
| f2c81635c8 | |||
| e214af6ae8 | |||
| 7ce723d21c | |||
| 4295a9a158 | |||
| 90d7be35e9 | |||
| 8d4e48831e | |||
| 90b30ebf7e | |||
| 173bcda436 | |||
| 6530bc70fb | |||
| 4c38887346 | |||
| 81fa4a204c | |||
| 4e6afa8c07 | |||
| 79aa88cc5d | |||
| fa4cb91846 | |||
| c58d0ad85d | |||
| 000f49551b | |||
| 9940e894ea | |||
| 27302a4932 | |||
| 507614ba43 | |||
| 86f9f1d0ab | |||
| 154e4d36e9 | |||
| a2b6afeac5 | |||
| 262830d86c | |||
| e4c01011c2 | |||
| a60d9e1f6d | |||
| f863550192 | |||
| 84b14f3a10 | |||
| 5121499f6b | |||
| 8f80892359 | |||
| cdb60e44eb | |||
| 25909d2629 | |||
| c7eee49525 | |||
| 621ba05107 | |||
| 39a70cead1 | |||
| d97f6550a2 | |||
| 516e58965a | |||
| b55b779ad3 | |||
| 74e53d0761 | |||
| 798a6d2be1 | |||
| b0e9c86971 | |||
| 661a56002f | |||
| c9bc00f016 | |||
| ec51b139e1 | |||
| eb83c3ca23 | |||
| 7924e3aacf | |||
| 78bcfcf870 | |||
| 1e2e7cb18b | |||
| 003601a70d | |||
| 1d58d5fe25 | |||
| de7fdfe41a | |||
| b31bad1b8f | |||
| 2efcf3ca98 | |||
| 761f946043 | |||
| 8aa465f18e | |||
| 0a5d68d92d | |||
| 42bd210fff | |||
| 1d13c314b3 | |||
| 0c9763a5a0 | |||
| 79a4a9c02e | |||
| 9d0b77f4cd | |||
| d486eee234 | |||
| cddd5f74ab | |||
| dfdb68e51f | |||
| 98c818320a | |||
| cc20b7ad72 | |||
| bc11a42b3f | |||
| 4fc06f2e0a | |||
| 82473c3d59 | |||
| b6a4236e5d | |||
| b04173be9b | |||
| 32ac38f85d | |||
| c9b49e506e | |||
| 6038e476e8 | |||
| 2c851c16e5 | |||
| 31584f2d91 | |||
| 0442125362 | |||
| fdcf402d82 | |||
| 13cda9b89e | |||
| fa6d911dda | |||
| 0db6bcc015 | |||
| 60ac039998 | |||
| 380d440d1c | |||
| 9038a30cee | |||
| 690c8c13b9 | |||
| 28ee6b62ed | |||
| 81577bdb3f | |||
| e67e3d95f3 | |||
| 27af8480ea | |||
| 6494cdc40c | |||
| ac7074efa2 | |||
| 263901cec4 | |||
| c12293dcbe | |||
| 5a4997dcae | |||
| 47f638eae7 | |||
| 882b834082 | |||
| b146ea411e | |||
| 8625ffbd45 | |||
| 0977cc4474 | |||
| d9a55faccc | 
| @ -150,7 +150,7 @@ function install_130 { | ||||
|   CUDNN_VERSION=9.13.0.50 | ||||
|   echo "Installing CUDA 13.0 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1" | ||||
|   # install CUDA 13.0 in the same container | ||||
|   install_cuda 13.0.0 cuda_13.0.0_580.65.06_linux | ||||
|   install_cuda 13.0.2 cuda_13.0.2_580.95.05_linux | ||||
|  | ||||
|   # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement | ||||
|   install_cudnn 13 $CUDNN_VERSION | ||||
|  | ||||
							
								
								
									
										359
									
								
								.claude/skills/docstring/SKILL.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										359
									
								
								.claude/skills/docstring/SKILL.md
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,359 @@ | ||||
| --- | ||||
| name: docstring | ||||
| description: Write docstrings for PyTorch functions and methods following PyTorch conventions. Use when writing or updating docstrings in PyTorch code. | ||||
| --- | ||||
|  | ||||
| # PyTorch Docstring Writing Guide | ||||
|  | ||||
| This skill describes how to write docstrings for functions and methods in the PyTorch project, following the conventions in `torch/_tensor_docs.py` and `torch/nn/functional.py`. | ||||
|  | ||||
| ## General Principles | ||||
|  | ||||
| - Use **raw strings** (`r"""..."""`) for all docstrings to avoid issues with LaTeX/math backslashes | ||||
| - Follow **Sphinx/reStructuredText** (reST) format for documentation | ||||
| - Be **concise but complete** - include all essential information | ||||
| - Always include **examples** when possible | ||||
| - Use **cross-references** to related functions/classes | ||||
|  | ||||
| ## Docstring Structure | ||||
|  | ||||
| ### 1. Function Signature (First Line) | ||||
|  | ||||
| Start with the function signature showing all parameters: | ||||
|  | ||||
| ```python | ||||
| r"""function_name(param1, param2, *, kwarg1=default1, kwarg2=default2) -> ReturnType | ||||
| ``` | ||||
|  | ||||
| **Notes:** | ||||
| - Include the function name | ||||
| - Show positional and keyword-only arguments (use `*` separator) | ||||
| - Include default values | ||||
| - Show return type annotation | ||||
| - This line should NOT end with a period | ||||
|  | ||||
| ### 2. Brief Description | ||||
|  | ||||
| Provide a one-line description of what the function does: | ||||
|  | ||||
| ```python | ||||
| r"""conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor | ||||
|  | ||||
| Applies a 2D convolution over an input image composed of several input | ||||
| planes. | ||||
| ``` | ||||
|  | ||||
| ### 3. Mathematical Formulas (if applicable) | ||||
|  | ||||
| Use Sphinx math directives for mathematical expressions: | ||||
|  | ||||
| ```python | ||||
| .. math:: | ||||
|     \text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)} | ||||
| ``` | ||||
|  | ||||
| Or inline math: `:math:\`x^2\`` | ||||
|  | ||||
| ### 4. Cross-References | ||||
|  | ||||
| Link to related classes and functions using Sphinx roles: | ||||
|  | ||||
| - `:class:\`~torch.nn.ModuleName\`` - Link to a class | ||||
| - `:func:\`torch.function_name\`` - Link to a function | ||||
| - `:meth:\`~Tensor.method_name\`` - Link to a method | ||||
| - `:attr:\`attribute_name\`` - Reference an attribute | ||||
| - The `~` prefix shows only the last component (e.g., `Conv2d` instead of `torch.nn.Conv2d`) | ||||
|  | ||||
| **Example:** | ||||
| ```python | ||||
| See :class:`~torch.nn.Conv2d` for details and output shape. | ||||
| ``` | ||||
|  | ||||
| ### 5. Notes and Warnings | ||||
|  | ||||
| Use admonitions for important information: | ||||
|  | ||||
| ```python | ||||
| .. note:: | ||||
|     This function doesn't work directly with NLLLoss, | ||||
|     which expects the Log to be computed between the Softmax and itself. | ||||
|     Use log_softmax instead (it's faster and has better numerical properties). | ||||
|  | ||||
| .. warning:: | ||||
|     :func:`new_tensor` always copies :attr:`data`. If you have a Tensor | ||||
|     ``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_` | ||||
|     or :func:`torch.Tensor.detach`. | ||||
| ``` | ||||
|  | ||||
| ### 6. Args Section | ||||
|  | ||||
| Document all parameters with type annotations and descriptions: | ||||
|  | ||||
| ```python | ||||
| Args: | ||||
|     input (Tensor): input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)` | ||||
|     weight (Tensor): filters of shape :math:`(\text{out\_channels} , kH , kW)` | ||||
|     bias (Tensor, optional): optional bias tensor of shape :math:`(\text{out\_channels})`. Default: ``None`` | ||||
|     stride (int or tuple): the stride of the convolving kernel. Can be a single number or a | ||||
|       tuple `(sH, sW)`. Default: 1 | ||||
| ``` | ||||
|  | ||||
| **Formatting rules:** | ||||
| - Parameter name in **lowercase** | ||||
| - Type in parentheses: `(Type)`, `(Type, optional)` for optional parameters | ||||
| - Description follows the type | ||||
| - For optional parameters, include "Default: ``value``" at the end | ||||
| - Use double backticks for inline code: ``` ``None`` ``` | ||||
| - Indent continuation lines by 2 spaces | ||||
|  | ||||
| ### 7. Keyword Args Section (if applicable) | ||||
|  | ||||
| Sometimes keyword arguments are documented separately: | ||||
|  | ||||
| ```python | ||||
| Keyword args: | ||||
|     dtype (:class:`torch.dtype`, optional): the desired type of returned tensor. | ||||
|         Default: if None, same :class:`torch.dtype` as this tensor. | ||||
|     device (:class:`torch.device`, optional): the desired device of returned tensor. | ||||
|         Default: if None, same :class:`torch.device` as this tensor. | ||||
|     requires_grad (bool, optional): If autograd should record operations on the | ||||
|         returned tensor. Default: ``False``. | ||||
| ``` | ||||
|  | ||||
| ### 8. Returns Section (if needed) | ||||
|  | ||||
| Document the return value: | ||||
|  | ||||
| ```python | ||||
| Returns: | ||||
|     Tensor: Sampled tensor of same shape as `logits` from the Gumbel-Softmax distribution. | ||||
|         If ``hard=True``, the returned samples will be one-hot, otherwise they will | ||||
|         be probability distributions that sum to 1 across `dim`. | ||||
| ``` | ||||
|  | ||||
| Or simply include it in the function signature line if obvious from context. | ||||
|  | ||||
| ### 9. Examples Section | ||||
|  | ||||
| Always include examples when possible: | ||||
|  | ||||
| ```python | ||||
| Examples:: | ||||
|  | ||||
|     >>> inputs = torch.randn(33, 16, 30) | ||||
|     >>> filters = torch.randn(20, 16, 5) | ||||
|     >>> F.conv1d(inputs, filters) | ||||
|  | ||||
|     >>> # With square kernels and equal stride | ||||
|     >>> filters = torch.randn(8, 4, 3, 3) | ||||
|     >>> inputs = torch.randn(1, 4, 5, 5) | ||||
|     >>> F.conv2d(inputs, filters, padding=1) | ||||
| ``` | ||||
|  | ||||
| **Formatting rules:** | ||||
| - Use `Examples::` with double colon | ||||
| - Use `>>>` prompt for Python code | ||||
| - Include comments with `#` when helpful | ||||
| - Show actual output when it helps understanding (indent without `>>>`) | ||||
|  | ||||
| ### 10. External References | ||||
|  | ||||
| Link to papers or external documentation: | ||||
|  | ||||
| ```python | ||||
| .. _Link Name: | ||||
|     https://arxiv.org/abs/1611.00712 | ||||
| ``` | ||||
|  | ||||
| Reference them in text: ```See `Link Name`_``` | ||||
|  | ||||
| ## Method Types | ||||
|  | ||||
| ### Native Python Functions | ||||
|  | ||||
| For regular Python functions, use a standard docstring: | ||||
|  | ||||
| ```python | ||||
| def relu(input: Tensor, inplace: bool = False) -> Tensor: | ||||
|     r"""relu(input, inplace=False) -> Tensor | ||||
|  | ||||
|     Applies the rectified linear unit function element-wise. See | ||||
|     :class:`~torch.nn.ReLU` for more details. | ||||
|     """ | ||||
|     # implementation | ||||
| ``` | ||||
|  | ||||
| ### C-Bound Functions (using add_docstr) | ||||
|  | ||||
| For C-bound functions, use `_add_docstr`: | ||||
|  | ||||
| ```python | ||||
| conv1d = _add_docstr( | ||||
|     torch.conv1d, | ||||
|     r""" | ||||
| conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor | ||||
|  | ||||
| Applies a 1D convolution over an input signal composed of several input | ||||
| planes. | ||||
|  | ||||
| See :class:`~torch.nn.Conv1d` for details and output shape. | ||||
|  | ||||
| Args: | ||||
|     input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)` | ||||
|     weight: filters of shape :math:`(\text{out\_channels} , kW)` | ||||
|     ... | ||||
| """, | ||||
| ) | ||||
| ``` | ||||
|  | ||||
| ### In-Place Variants | ||||
|  | ||||
| For in-place operations (ending with `_`), reference the original: | ||||
|  | ||||
| ```python | ||||
| add_docstr_all( | ||||
|     "abs_", | ||||
|     r""" | ||||
| abs_() -> Tensor | ||||
|  | ||||
| In-place version of :meth:`~Tensor.abs` | ||||
| """, | ||||
| ) | ||||
| ``` | ||||
|  | ||||
| ### Alias Functions | ||||
|  | ||||
| For aliases, simply reference the original: | ||||
|  | ||||
| ```python | ||||
| add_docstr_all( | ||||
|     "absolute", | ||||
|     r""" | ||||
| absolute() -> Tensor | ||||
|  | ||||
| Alias for :func:`abs` | ||||
| """, | ||||
| ) | ||||
| ``` | ||||
|  | ||||
| ## Common Patterns | ||||
|  | ||||
| ### Shape Documentation | ||||
|  | ||||
| Use LaTeX math notation for tensor shapes: | ||||
|  | ||||
| ```python | ||||
| :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)` | ||||
| ``` | ||||
|  | ||||
| ### Reusable Argument Definitions | ||||
|  | ||||
| For commonly used arguments, define them once and reuse: | ||||
|  | ||||
| ```python | ||||
| common_args = parse_kwargs( | ||||
|     """ | ||||
|     dtype (:class:`torch.dtype`, optional): the desired type of returned tensor. | ||||
|         Default: if None, same as this tensor. | ||||
| """ | ||||
| ) | ||||
|  | ||||
| # Then use with .format(): | ||||
| r""" | ||||
| ... | ||||
|  | ||||
| Keyword args: | ||||
|     {dtype} | ||||
|     {device} | ||||
| """.format(**common_args) | ||||
| ``` | ||||
|  | ||||
| ### Template Insertion | ||||
|  | ||||
| Insert reproducibility notes or other common text: | ||||
|  | ||||
| ```python | ||||
| r""" | ||||
| {tf32_note} | ||||
|  | ||||
| {cudnn_reproducibility_note} | ||||
| """.format(**reproducibility_notes, **tf32_notes) | ||||
| ``` | ||||
|  | ||||
| ## Complete Example | ||||
|  | ||||
| Here's a complete example showing all elements: | ||||
|  | ||||
| ```python | ||||
| def gumbel_softmax( | ||||
|     logits: Tensor, | ||||
|     tau: float = 1, | ||||
|     hard: bool = False, | ||||
|     eps: float = 1e-10, | ||||
|     dim: int = -1, | ||||
| ) -> Tensor: | ||||
|     r""" | ||||
|     Sample from the Gumbel-Softmax distribution and optionally discretize. | ||||
|  | ||||
|     Args: | ||||
|         logits (Tensor): `[..., num_features]` unnormalized log probabilities | ||||
|         tau (float): non-negative scalar temperature | ||||
|         hard (bool): if ``True``, the returned samples will be discretized as one-hot vectors, | ||||
|               but will be differentiated as if it is the soft sample in autograd. Default: ``False`` | ||||
|         dim (int): A dimension along which softmax will be computed. Default: -1 | ||||
|  | ||||
|     Returns: | ||||
|         Tensor: Sampled tensor of same shape as `logits` from the Gumbel-Softmax distribution. | ||||
|             If ``hard=True``, the returned samples will be one-hot, otherwise they will | ||||
|             be probability distributions that sum to 1 across `dim`. | ||||
|  | ||||
|     .. note:: | ||||
|         This function is here for legacy reasons, may be removed from nn.Functional in the future. | ||||
|  | ||||
|     Examples:: | ||||
|         >>> logits = torch.randn(20, 32) | ||||
|         >>> # Sample soft categorical using reparametrization trick: | ||||
|         >>> F.gumbel_softmax(logits, tau=1, hard=False) | ||||
|         >>> # Sample hard categorical using "Straight-through" trick: | ||||
|         >>> F.gumbel_softmax(logits, tau=1, hard=True) | ||||
|  | ||||
|     .. _Link 1: | ||||
|         https://arxiv.org/abs/1611.00712 | ||||
|     """ | ||||
|     # implementation | ||||
| ``` | ||||
|  | ||||
| ## Quick Checklist | ||||
|  | ||||
| When writing a PyTorch docstring, ensure: | ||||
|  | ||||
| - [ ] Use raw string (`r"""`) | ||||
| - [ ] Include function signature on first line | ||||
| - [ ] Provide brief description | ||||
| - [ ] Document all parameters in Args section with types | ||||
| - [ ] Include default values for optional parameters | ||||
| - [ ] Use Sphinx cross-references (`:func:`, `:class:`, `:meth:`) | ||||
| - [ ] Add mathematical formulas if applicable | ||||
| - [ ] Include at least one example in Examples section | ||||
| - [ ] Add warnings/notes for important caveats | ||||
| - [ ] Link to related module class with `:class:` | ||||
| - [ ] Use proper math notation for tensor shapes | ||||
| - [ ] Follow consistent formatting and indentation | ||||
|  | ||||
| ## Common Sphinx Roles Reference | ||||
|  | ||||
| - `:class:\`~torch.nn.Module\`` - Class reference | ||||
| - `:func:\`torch.function\`` - Function reference | ||||
| - `:meth:\`~Tensor.method\`` - Method reference | ||||
| - `:attr:\`attribute\`` - Attribute reference | ||||
| - `:math:\`equation\`` - Inline math | ||||
| - `:ref:\`label\`` - Internal reference | ||||
| - ``` ``code`` ``` - Inline code (use double backticks) | ||||
|  | ||||
| ## Additional Notes | ||||
|  | ||||
| - **Indentation**: Use 4 spaces for code, 2 spaces for continuation of parameter descriptions | ||||
| - **Line length**: Try to keep lines under 100 characters when possible | ||||
| - **Periods**: End sentences with periods, but not the signature line | ||||
| - **Backticks**: Use double backticks for code: ``` ``True`` ``None`` ``False`` ``` | ||||
| - **Types**: Common types are `Tensor`, `int`, `float`, `bool`, `str`, `tuple`, `list`, etc. | ||||
							
								
								
									
										385
									
								
								.claude/skills/skill-writer/SKILL.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										385
									
								
								.claude/skills/skill-writer/SKILL.md
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,385 @@ | ||||
| --- | ||||
| name: skill-writer | ||||
| description: Guide users through creating Agent Skills for Claude Code. Use when the user wants to create, write, author, or design a new Skill, or needs help with SKILL.md files, frontmatter, or skill structure. | ||||
| --- | ||||
|  | ||||
| # Skill Writer | ||||
|  | ||||
| This Skill helps you create well-structured Agent Skills for Claude Code that follow best practices and validation requirements. | ||||
|  | ||||
| ## When to use this Skill | ||||
|  | ||||
| Use this Skill when: | ||||
| - Creating a new Agent Skill | ||||
| - Writing or updating SKILL.md files | ||||
| - Designing skill structure and frontmatter | ||||
| - Troubleshooting skill discovery issues | ||||
| - Converting existing prompts or workflows into Skills | ||||
|  | ||||
| ## Instructions | ||||
|  | ||||
| ### Step 1: Determine Skill scope | ||||
|  | ||||
| First, understand what the Skill should do: | ||||
|  | ||||
| 1. **Ask clarifying questions**: | ||||
|    - What specific capability should this Skill provide? | ||||
|    - When should Claude use this Skill? | ||||
|    - What tools or resources does it need? | ||||
|    - Is this for personal use or team sharing? | ||||
|  | ||||
| 2. **Keep it focused**: One Skill = one capability | ||||
|    - Good: "PDF form filling", "Excel data analysis" | ||||
|    - Too broad: "Document processing", "Data tools" | ||||
|  | ||||
| ### Step 2: Choose Skill location | ||||
|  | ||||
| Determine where to create the Skill: | ||||
|  | ||||
| **Personal Skills** (`~/.claude/skills/`): | ||||
| - Individual workflows and preferences | ||||
| - Experimental Skills | ||||
| - Personal productivity tools | ||||
|  | ||||
| **Project Skills** (`.claude/skills/`): | ||||
| - Team workflows and conventions | ||||
| - Project-specific expertise | ||||
| - Shared utilities (committed to git) | ||||
|  | ||||
| ### Step 3: Create Skill structure | ||||
|  | ||||
| Create the directory and files: | ||||
|  | ||||
| ```bash | ||||
| # Personal | ||||
| mkdir -p ~/.claude/skills/skill-name | ||||
|  | ||||
| # Project | ||||
| mkdir -p .claude/skills/skill-name | ||||
| ``` | ||||
|  | ||||
| For multi-file Skills: | ||||
| ``` | ||||
| skill-name/ | ||||
| ├── SKILL.md (required) | ||||
| ├── reference.md (optional) | ||||
| ├── examples.md (optional) | ||||
| ├── scripts/ | ||||
| │   └── helper.py (optional) | ||||
| └── templates/ | ||||
|     └── template.txt (optional) | ||||
| ``` | ||||
|  | ||||
| ### Step 4: Write SKILL.md frontmatter | ||||
|  | ||||
| Create YAML frontmatter with required fields: | ||||
|  | ||||
| ```yaml | ||||
| --- | ||||
| name: skill-name | ||||
| description: Brief description of what this does and when to use it | ||||
| --- | ||||
| ``` | ||||
|  | ||||
| **Field requirements**: | ||||
|  | ||||
| - **name**: | ||||
|   - Lowercase letters, numbers, hyphens only | ||||
|   - Max 64 characters | ||||
|   - Must match directory name | ||||
|   - Good: `pdf-processor`, `git-commit-helper` | ||||
|   - Bad: `PDF_Processor`, `Git Commits!` | ||||
|  | ||||
| - **description**: | ||||
|   - Max 1024 characters | ||||
|   - Include BOTH what it does AND when to use it | ||||
|   - Use specific trigger words users would say | ||||
|   - Mention file types, operations, and context | ||||
|  | ||||
| **Optional frontmatter fields**: | ||||
|  | ||||
| - **allowed-tools**: Restrict tool access (comma-separated list) | ||||
|   ```yaml | ||||
|   allowed-tools: Read, Grep, Glob | ||||
|   ``` | ||||
|   Use for: | ||||
|   - Read-only Skills | ||||
|   - Security-sensitive workflows | ||||
|   - Limited-scope operations | ||||
|  | ||||
| ### Step 5: Write effective descriptions | ||||
|  | ||||
| The description is critical for Claude to discover your Skill. | ||||
|  | ||||
| **Formula**: `[What it does] + [When to use it] + [Key triggers]` | ||||
|  | ||||
| **Examples**: | ||||
|  | ||||
| ✅ **Good**: | ||||
| ```yaml | ||||
| description: Extract text and tables from PDF files, fill forms, merge documents. Use when working with PDF files or when the user mentions PDFs, forms, or document extraction. | ||||
| ``` | ||||
|  | ||||
| ✅ **Good**: | ||||
| ```yaml | ||||
| description: Analyze Excel spreadsheets, create pivot tables, and generate charts. Use when working with Excel files, spreadsheets, or analyzing tabular data in .xlsx format. | ||||
| ``` | ||||
|  | ||||
| ❌ **Too vague**: | ||||
| ```yaml | ||||
| description: Helps with documents | ||||
| description: For data analysis | ||||
| ``` | ||||
|  | ||||
| **Tips**: | ||||
| - Include specific file extensions (.pdf, .xlsx, .json) | ||||
| - Mention common user phrases ("analyze", "extract", "generate") | ||||
| - List concrete operations (not generic verbs) | ||||
| - Add context clues ("Use when...", "For...") | ||||
|  | ||||
| ### Step 6: Structure the Skill content | ||||
|  | ||||
| Use clear Markdown sections: | ||||
|  | ||||
| ```markdown | ||||
| # Skill Name | ||||
|  | ||||
| Brief overview of what this Skill does. | ||||
|  | ||||
| ## Quick start | ||||
|  | ||||
| Provide a simple example to get started immediately. | ||||
|  | ||||
| ## Instructions | ||||
|  | ||||
| Step-by-step guidance for Claude: | ||||
| 1. First step with clear action | ||||
| 2. Second step with expected outcome | ||||
| 3. Handle edge cases | ||||
|  | ||||
| ## Examples | ||||
|  | ||||
| Show concrete usage examples with code or commands. | ||||
|  | ||||
| ## Best practices | ||||
|  | ||||
| - Key conventions to follow | ||||
| - Common pitfalls to avoid | ||||
| - When to use vs. not use | ||||
|  | ||||
| ## Requirements | ||||
|  | ||||
| List any dependencies or prerequisites: | ||||
| ```bash | ||||
| pip install package-name | ||||
| ``` | ||||
|  | ||||
| ## Advanced usage | ||||
|  | ||||
| For complex scenarios, see [reference.md](reference.md). | ||||
| ``` | ||||
|  | ||||
| ### Step 7: Add supporting files (optional) | ||||
|  | ||||
| Create additional files for progressive disclosure: | ||||
|  | ||||
| **reference.md**: Detailed API docs, advanced options | ||||
| **examples.md**: Extended examples and use cases | ||||
| **scripts/**: Helper scripts and utilities | ||||
| **templates/**: File templates or boilerplate | ||||
|  | ||||
| Reference them from SKILL.md: | ||||
| ```markdown | ||||
| For advanced usage, see [reference.md](reference.md). | ||||
|  | ||||
| Run the helper script: | ||||
| \`\`\`bash | ||||
| python scripts/helper.py input.txt | ||||
| \`\`\` | ||||
| ``` | ||||
|  | ||||
| ### Step 8: Validate the Skill | ||||
|  | ||||
| Check these requirements: | ||||
|  | ||||
| ✅ **File structure**: | ||||
| - [ ] SKILL.md exists in correct location | ||||
| - [ ] Directory name matches frontmatter `name` | ||||
|  | ||||
| ✅ **YAML frontmatter**: | ||||
| - [ ] Opening `---` on line 1 | ||||
| - [ ] Closing `---` before content | ||||
| - [ ] Valid YAML (no tabs, correct indentation) | ||||
| - [ ] `name` follows naming rules | ||||
| - [ ] `description` is specific and < 1024 chars | ||||
|  | ||||
| ✅ **Content quality**: | ||||
| - [ ] Clear instructions for Claude | ||||
| - [ ] Concrete examples provided | ||||
| - [ ] Edge cases handled | ||||
| - [ ] Dependencies listed (if any) | ||||
|  | ||||
| ✅ **Testing**: | ||||
| - [ ] Description matches user questions | ||||
| - [ ] Skill activates on relevant queries | ||||
| - [ ] Instructions are clear and actionable | ||||
|  | ||||
| ### Step 9: Test the Skill | ||||
|  | ||||
| 1. **Restart Claude Code** (if running) to load the Skill | ||||
|  | ||||
| 2. **Ask relevant questions** that match the description: | ||||
|    ``` | ||||
|    Can you help me extract text from this PDF? | ||||
|    ``` | ||||
|  | ||||
| 3. **Verify activation**: Claude should use the Skill automatically | ||||
|  | ||||
| 4. **Check behavior**: Confirm Claude follows the instructions correctly | ||||
|  | ||||
| ### Step 10: Debug if needed | ||||
|  | ||||
| If Claude doesn't use the Skill: | ||||
|  | ||||
| 1. **Make description more specific**: | ||||
|    - Add trigger words | ||||
|    - Include file types | ||||
|    - Mention common user phrases | ||||
|  | ||||
| 2. **Check file location**: | ||||
|    ```bash | ||||
|    ls ~/.claude/skills/skill-name/SKILL.md | ||||
|    ls .claude/skills/skill-name/SKILL.md | ||||
|    ``` | ||||
|  | ||||
| 3. **Validate YAML**: | ||||
|    ```bash | ||||
|    cat SKILL.md | head -n 10 | ||||
|    ``` | ||||
|  | ||||
| 4. **Run debug mode**: | ||||
|    ```bash | ||||
|    claude --debug | ||||
|    ``` | ||||
|  | ||||
| ## Common patterns | ||||
|  | ||||
| ### Read-only Skill | ||||
|  | ||||
| ```yaml | ||||
| --- | ||||
| name: code-reader | ||||
| description: Read and analyze code without making changes. Use for code review, understanding codebases, or documentation. | ||||
| allowed-tools: Read, Grep, Glob | ||||
| --- | ||||
| ``` | ||||
|  | ||||
| ### Script-based Skill | ||||
|  | ||||
| ```yaml | ||||
| --- | ||||
| name: data-processor | ||||
| description: Process CSV and JSON data files with Python scripts. Use when analyzing data files or transforming datasets. | ||||
| --- | ||||
|  | ||||
| # Data Processor | ||||
|  | ||||
| ## Instructions | ||||
|  | ||||
| 1. Use the processing script: | ||||
| \`\`\`bash | ||||
| python scripts/process.py input.csv --output results.json | ||||
| \`\`\` | ||||
|  | ||||
| 2. Validate output with: | ||||
| \`\`\`bash | ||||
| python scripts/validate.py results.json | ||||
| \`\`\` | ||||
| ``` | ||||
|  | ||||
| ### Multi-file Skill with progressive disclosure | ||||
|  | ||||
| ```yaml | ||||
| --- | ||||
| name: api-designer | ||||
| description: Design REST APIs following best practices. Use when creating API endpoints, designing routes, or planning API architecture. | ||||
| --- | ||||
|  | ||||
| # API Designer | ||||
|  | ||||
| Quick start: See [examples.md](examples.md) | ||||
|  | ||||
| Detailed reference: See [reference.md](reference.md) | ||||
|  | ||||
| ## Instructions | ||||
|  | ||||
| 1. Gather requirements | ||||
| 2. Design endpoints (see examples.md) | ||||
| 3. Document with OpenAPI spec | ||||
| 4. Review against best practices (see reference.md) | ||||
| ``` | ||||
|  | ||||
| ## Best practices for Skill authors | ||||
|  | ||||
| 1. **One Skill, one purpose**: Don't create mega-Skills | ||||
| 2. **Specific descriptions**: Include trigger words users will say | ||||
| 3. **Clear instructions**: Write for Claude, not humans | ||||
| 4. **Concrete examples**: Show real code, not pseudocode | ||||
| 5. **List dependencies**: Mention required packages in description | ||||
| 6. **Test with teammates**: Verify activation and clarity | ||||
| 7. **Version your Skills**: Document changes in content | ||||
| 8. **Use progressive disclosure**: Put advanced details in separate files | ||||
|  | ||||
| ## Validation checklist | ||||
|  | ||||
| Before finalizing a Skill, verify: | ||||
|  | ||||
| - [ ] Name is lowercase, hyphens only, max 64 chars | ||||
| - [ ] Description is specific and < 1024 chars | ||||
| - [ ] Description includes "what" and "when" | ||||
| - [ ] YAML frontmatter is valid | ||||
| - [ ] Instructions are step-by-step | ||||
| - [ ] Examples are concrete and realistic | ||||
| - [ ] Dependencies are documented | ||||
| - [ ] File paths use forward slashes | ||||
| - [ ] Skill activates on relevant queries | ||||
| - [ ] Claude follows instructions correctly | ||||
|  | ||||
| ## Troubleshooting | ||||
|  | ||||
| **Skill doesn't activate**: | ||||
| - Make description more specific with trigger words | ||||
| - Include file types and operations in description | ||||
| - Add "Use when..." clause with user phrases | ||||
|  | ||||
| **Multiple Skills conflict**: | ||||
| - Make descriptions more distinct | ||||
| - Use different trigger words | ||||
| - Narrow the scope of each Skill | ||||
|  | ||||
| **Skill has errors**: | ||||
| - Check YAML syntax (no tabs, proper indentation) | ||||
| - Verify file paths (use forward slashes) | ||||
| - Ensure scripts have execute permissions | ||||
| - List all dependencies | ||||
|  | ||||
| ## Examples | ||||
|  | ||||
| See the documentation for complete examples: | ||||
| - Simple single-file Skill (commit-helper) | ||||
| - Skill with tool permissions (code-reviewer) | ||||
| - Multi-file Skill (pdf-processing) | ||||
|  | ||||
| ## Output format | ||||
|  | ||||
| When creating a Skill, I will: | ||||
|  | ||||
| 1. Ask clarifying questions about scope and requirements | ||||
| 2. Suggest a Skill name and location | ||||
| 3. Create the SKILL.md file with proper frontmatter | ||||
| 4. Include clear instructions and examples | ||||
| 5. Add supporting files if needed | ||||
| 6. Provide testing instructions | ||||
| 7. Validate against all requirements | ||||
|  | ||||
| The result will be a complete, working Skill that follows all best practices and validation rules. | ||||
							
								
								
									
										7
									
								
								.github/actions/setup-rocm/action.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								.github/actions/setup-rocm/action.yml
									
									
									
									
										vendored
									
									
								
							| @ -124,3 +124,10 @@ runs: | ||||
|       id: login-ecr | ||||
|       continue-on-error: true | ||||
|       uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1 | ||||
|  | ||||
|     - name: Preserve github env variables for use in docker | ||||
|       shell: bash | ||||
|       run: | | ||||
|         env | grep '^GITHUB' >> "${RUNNER_TEMP}/github_env_${GITHUB_RUN_ID}" | ||||
|         env | grep '^CI' >> "${RUNNER_TEMP}/github_env_${GITHUB_RUN_ID}" | ||||
|         env | grep '^RUNNER' >> "${RUNNER_TEMP}/github_env_${GITHUB_RUN_ID}" | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/xla.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/xla.txt
									
									
									
									
										vendored
									
									
								
							| @ -1 +1 @@ | ||||
| 0fa6e3129e61143224663e1ec67980d12b7ec4eb | ||||
| df6798dfb931ce7c7fe5bed2447cd1092a5981af | ||||
|  | ||||
							
								
								
									
										5
									
								
								.github/ci_configs/vllm/Dockerfile
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										5
									
								
								.github/ci_configs/vllm/Dockerfile
									
									
									
									
										vendored
									
									
								
							| @ -283,6 +283,9 @@ RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \ | ||||
|         uv pip install --system $(cat torch_build_versions.txt | xargs) --index-url https://download.pytorch.org/whl/nightly/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.'); \ | ||||
|     fi | ||||
|  | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system --pre apache-tvm-ffi==0.1.0b15 | ||||
|  | ||||
| # Install the vllm wheel from previous stage | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system /wheels/vllm/*.whl --verbose | ||||
| @ -295,6 +298,8 @@ RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
| ARG torch_cuda_arch_list='8.0;8.9;9.0a;10.0a;12.0' | ||||
| ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list} | ||||
|  | ||||
| # TODO(elainewy): remove this once vllm commit is updated, and install flashinfer from pip | ||||
| # see https://github.com/pytorch/pytorch/pull/165274#issuecomment-3408531784 | ||||
| ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git" | ||||
| ARG FLASHINFER_GIT_REF="v0.2.14.post1" | ||||
|  | ||||
|  | ||||
							
								
								
									
										9
									
								
								.github/label_to_label.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										9
									
								
								.github/label_to_label.yml
									
									
									
									
										vendored
									
									
								
							| @ -15,6 +15,11 @@ | ||||
|   - "module: reinplacing" | ||||
|   then: | ||||
|   - "module: pt2-dispatcher" | ||||
| - any: | ||||
|   - "vllm-compile" | ||||
|   then: | ||||
|   - "module: vllm" | ||||
|   - "oncall: pt2" | ||||
| - any: | ||||
|   - "module: vmap" | ||||
|   then: | ||||
| @ -27,10 +32,6 @@ | ||||
|   - "module: pt2 optimizer" | ||||
|   then: | ||||
|   - "module: dynamo" | ||||
| - any: | ||||
|   - "module: flex attention" | ||||
|   then: | ||||
|   - "module: higher order operators" | ||||
| - any: | ||||
|   - "module: aotinductor" | ||||
|   then: | ||||
|  | ||||
							
								
								
									
										22
									
								
								.github/scripts/generate_binary_build_matrix.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										22
									
								
								.github/scripts/generate_binary_build_matrix.py
									
									
									
									
										vendored
									
									
								
							| @ -22,7 +22,7 @@ CUDA_ARCHES_FULL_VERSION = { | ||||
|     "12.6": "12.6.3", | ||||
|     "12.8": "12.8.1", | ||||
|     "12.9": "12.9.1", | ||||
|     "13.0": "13.0.0", | ||||
|     "13.0": "13.0.2", | ||||
| } | ||||
| CUDA_ARCHES_CUDNN_VERSION = { | ||||
|     "12.6": "9", | ||||
| @ -96,21 +96,21 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = { | ||||
|         "nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'" | ||||
|     ), | ||||
|     "13.0": ( | ||||
|         "nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | " | ||||
|         "nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | " | ||||
|         "nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | " | ||||
|         "nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | " | ||||
|         "nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | " | ||||
|         "nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | " | ||||
|         "nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | " | ||||
|         "nvidia-cublas==13.0.0.19; platform_system == 'Linux' | " | ||||
|         "nvidia-cufft==12.0.0.15; platform_system == 'Linux' | " | ||||
|         "nvidia-cublas==13.1.0.3; platform_system == 'Linux' | " | ||||
|         "nvidia-cufft==12.0.0.61; platform_system == 'Linux' | " | ||||
|         "nvidia-curand==10.4.0.35; platform_system == 'Linux' | " | ||||
|         "nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | " | ||||
|         "nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | " | ||||
|         "nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | " | ||||
|         "nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | " | ||||
|         "nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | " | ||||
|         "nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | " | ||||
|         "nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | " | ||||
|         "nvidia-nvtx==13.0.39; platform_system == 'Linux' | " | ||||
|         "nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | " | ||||
|         "nvidia-cufile==1.15.0.42; platform_system == 'Linux'" | ||||
|         "nvidia-nvtx==13.0.85; platform_system == 'Linux' | " | ||||
|         "nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | " | ||||
|         "nvidia-cufile==1.15.1.6; platform_system == 'Linux'" | ||||
|     ), | ||||
|     "xpu": ( | ||||
|         "intel-cmplr-lib-rt==2025.2.1 | " | ||||
|  | ||||
							
								
								
									
										14
									
								
								.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -270,7 +270,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_10-cuda-aarch64-13_0 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -519,7 +519,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_11-cuda-aarch64-13_0 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -768,7 +768,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_12-cuda-aarch64-13_0 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -1017,7 +1017,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_13-cuda-aarch64-13_0 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -1266,7 +1266,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_13t-cuda-aarch64-13_0 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -1515,7 +1515,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_14-cuda-aarch64-13_0 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -1764,7 +1764,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_14t-cuda-aarch64-13_0 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|  | ||||
							
								
								
									
										14
									
								
								.github/workflows/generated-linux-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								.github/workflows/generated-linux-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -325,7 +325,7 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_10-cuda13_0 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_10-cuda13_0-test:  # Testing | ||||
| @ -991,7 +991,7 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_11-cuda13_0 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_11-cuda13_0-test:  # Testing | ||||
| @ -1657,7 +1657,7 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_12-cuda13_0 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-cuda13_0-test:  # Testing | ||||
| @ -2323,7 +2323,7 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_13-cuda13_0 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13-cuda13_0-test:  # Testing | ||||
| @ -2989,7 +2989,7 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_13t-cuda13_0 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13t-cuda13_0-test:  # Testing | ||||
| @ -3655,7 +3655,7 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_14-cuda13_0 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14-cuda13_0-test:  # Testing | ||||
| @ -4321,7 +4321,7 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_14t-cuda13_0 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14t-cuda13_0-test:  # Testing | ||||
|  | ||||
							
								
								
									
										1
									
								
								.github/workflows/inductor-periodic.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/workflows/inductor-periodic.yml
									
									
									
									
										vendored
									
									
								
							| @ -88,7 +88,6 @@ jobs: | ||||
|     with: | ||||
|       build-environment: linux-jammy-rocm-py3_10 | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3-benchmarks | ||||
|       sync-tag: rocm-build | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "dynamo_eager_torchbench", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|  | ||||
							
								
								
									
										3
									
								
								.github/workflows/pull.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								.github/workflows/pull.yml
									
									
									
									
										vendored
									
									
								
							| @ -347,7 +347,8 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       sync-tag: linux-xpu-n-build | ||||
|       # This should sync with the build in xpu.yml but xpu uses a larger runner | ||||
|       # sync-tag: linux-xpu-n-build | ||||
|       runner_prefix: ${{ needs.get-label-type.outputs.label-type }} | ||||
|       build-environment: linux-jammy-xpu-n-py3.10 | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-xpu-n-py3 | ||||
|  | ||||
							
								
								
									
										1
									
								
								.github/workflows/rocm-mi300.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/workflows/rocm-mi300.yml
									
									
									
									
										vendored
									
									
								
							| @ -45,7 +45,6 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build-environment: linux-noble-rocm-py3.12-mi300 | ||||
|       docker-image-name: ci-image:pytorch-linux-noble-rocm-n-py3 | ||||
|       sync-tag: rocm-build | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|  | ||||
							
								
								
									
										1
									
								
								.github/workflows/rocm-mi355.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/workflows/rocm-mi355.yml
									
									
									
									
										vendored
									
									
								
							| @ -42,7 +42,6 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build-environment: linux-noble-rocm-py3.12-mi355 | ||||
|       docker-image-name: ci-image:pytorch-linux-noble-rocm-n-py3 | ||||
|       sync-tag: rocm-build | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|  | ||||
							
								
								
									
										12
									
								
								.github/workflows/rocm-navi31.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										12
									
								
								.github/workflows/rocm-navi31.yml
									
									
									
									
										vendored
									
									
								
							| @ -26,11 +26,23 @@ jobs: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|  | ||||
|   get-label-type: | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }} | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
|       curr_branch: ${{ github.head_ref || github.ref_name }} | ||||
|       curr_ref_type: ${{ github.ref_type }} | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-build: | ||||
|     if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }} | ||||
|     name: linux-jammy-rocm-py3.10 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build-environment: linux-jammy-rocm-py3.10 | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3 | ||||
|       sync-tag: rocm-build | ||||
|  | ||||
							
								
								
									
										12
									
								
								.github/workflows/rocm.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										12
									
								
								.github/workflows/rocm.yml
									
									
									
									
										vendored
									
									
								
							| @ -26,11 +26,23 @@ jobs: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|  | ||||
|   get-label-type: | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }} | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
|       curr_branch: ${{ github.head_ref || github.ref_name }} | ||||
|       curr_ref_type: ${{ github.ref_type }} | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-build: | ||||
|     if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }} | ||||
|     name: linux-jammy-rocm-py3.10 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build-environment: linux-jammy-rocm-py3.10 | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3 | ||||
|       sync-tag: rocm-build | ||||
|  | ||||
| @ -833,8 +833,7 @@ exclude_patterns = [ | ||||
| command = [ | ||||
|     'python3', | ||||
|     'tools/linter/adapters/grep_linter.py', | ||||
|     '--pattern=cudaSetDevice(', | ||||
|     '--pattern=cudaGetDevice(', | ||||
|     '--pattern=(cudaSetDevice|cudaGetDevice)\\(', | ||||
|     '--linter-name=RAWCUDADEVICE', | ||||
|     '--error-name=raw CUDA API usage', | ||||
|     """--error-description=\ | ||||
| @ -1138,11 +1137,8 @@ command = [ | ||||
| [[linter]] | ||||
| code = 'WORKFLOWSYNC' | ||||
| include_patterns = [ | ||||
|     '.github/workflows/pull.yml', | ||||
|     '.github/workflows/trunk.yml', | ||||
|     '.github/workflows/periodic.yml', | ||||
|     '.github/workflows/mac-mps.yml', | ||||
|     '.github/workflows/slow.yml', | ||||
|     '.github/workflows/*.yml', | ||||
|     '.github/workflows/*.yaml', | ||||
| ] | ||||
| command = [ | ||||
|     'python3', | ||||
|  | ||||
| @ -31,9 +31,9 @@ Be careful when running untrusted models. This classification includes models cr | ||||
|  | ||||
| **Prefer to execute untrusted models within a secure, isolated environment such as a sandbox** (e.g., containers, virtual machines). This helps protect your system from potentially malicious code. You can find further details and instructions in [this page](https://developers.google.com/code-sandboxing). | ||||
|  | ||||
| **Be mindful of risky model formats**. Give preference to share and load weights with the appropriate format for your use case. [safetensors](https://huggingface.co/docs/safetensors/en/index) gives the most safety but is the most restricted in what it supports. [`torch.load`](https://pytorch.org/docs/stable/generated/torch.load.html#torch.load) with `weights_only=True` is also secure to our knowledge even though it offers significantly larger surface of attack. Loading un-trusted checkpoint with `weights_only=False` MUST never be done. | ||||
|  | ||||
| **Be mindful of risky model formats**. Give preference to share and load weights with the appropriate format for your use case. [safetensors](https://huggingface.co/docs/safetensors/en/index) gives the most safety but is the most restricted in what it supports. [`torch.load`](https://pytorch.org/docs/stable/generated/torch.load.html#torch.load) has a significantly larger surface of attack but is more flexible in what it can serialize. See the documentation for more details. | ||||
|  | ||||
| Even for more secure serialization formats, unexpected inputs to the downstream system can cause diverse security threats (e.g. denial of service, out of bound reads/writes) and thus we recommend extensive validation of any untrusted inputs. | ||||
|  | ||||
| Important Note: The trustworthiness of a model is not binary. You must always determine the proper level of caution depending on the specific model and how it matches your use case and risk tolerance. | ||||
|  | ||||
|  | ||||
| @ -38,7 +38,7 @@ set_bool(AT_HIPSPARSELT_ENABLED CAFFE2_USE_HIPSPARSELT) | ||||
|  | ||||
| configure_file(Config.h.in "${CMAKE_CURRENT_SOURCE_DIR}/Config.h") | ||||
| # TODO: Do not generate CUDAConfig.h for ROCm BUILDS | ||||
| # At the moment, `jit_macors.h` include CUDAConfig.h for both CUDA and HIP builds | ||||
| # At the moment, `jit_macros.h` include CUDAConfig.h for both CUDA and HIP builds | ||||
| if(USE_CUDA OR USE_ROCM) | ||||
|   configure_file(cuda/CUDAConfig.h.in "${CMAKE_CURRENT_SOURCE_DIR}/cuda/CUDAConfig.h") | ||||
| endif() | ||||
|  | ||||
| @ -122,7 +122,7 @@ void FunctionalTensorWrapper::freeze_storage() const { | ||||
| //          |   have their own storages, but backends like functorch      | | ||||
| //         \/   are allowed to re-alias underneath the pass               \/ | ||||
| // . - - - - - - - - - - - - - .                             . - - - - - - - - - - - - - - - . | ||||
| // |    underyling_storage     |                             |      underyling_storage       | | ||||
| // |    underlying_storage     |                             |      underlying_storage       | | ||||
| // . - - - - - - - - - - - - - .                             . - - - - - - - - - - - - - - - . | ||||
| // | ||||
| // This constructor is only used by view ops. | ||||
|  | ||||
| @ -1534,7 +1534,7 @@ void TensorIteratorBase::build(TensorIteratorConfig& config) { | ||||
|  | ||||
|   // XLA and lazy tensors don't have storage, so they don't have an underlying data pointer. | ||||
|   // Nothing beyond this point is important for meta functions, so it's fine to exit early here. | ||||
|   // Extend the condition to MAIA tesnors as MAIA tensors also don't have storage. | ||||
|   // Extend the condition to MAIA tensors as MAIA tensors also don't have storage. | ||||
|   if (privateuse1_without_storage  || | ||||
|       common_device_.type() == DeviceType::XLA  || | ||||
|       common_device_.type() == DeviceType::IPU  || | ||||
|  | ||||
| @ -94,11 +94,11 @@ struct PinnedReserveSegment { | ||||
| struct TORCH_API HostStats { | ||||
|   // COUNT: total allocations (active) | ||||
|   Stat active_requests; | ||||
|   // SUM: bytes allocated/reserved by this memory alocator. (active) | ||||
|   // SUM: bytes allocated/reserved by this memory allocator. (active) | ||||
|   Stat active_bytes; | ||||
|   // COUNT: total allocations (active + free) | ||||
|   Stat allocations; | ||||
|   // SUM: bytes allocated/reserved by this memory alocator. This accounts | ||||
|   // SUM: bytes allocated/reserved by this memory allocator. This accounts | ||||
|   // for both free and in-use blocks. | ||||
|   Stat allocated_bytes; | ||||
|  | ||||
| @ -127,7 +127,7 @@ struct alignas(hardware_destructive_interference_size) HostStatsStaged { | ||||
|   // COUNT: total allocations (active + free) | ||||
|   // LOCK: access to this stat is protected by the allocator's blocks_mutex_ | ||||
|   Stat allocations; | ||||
|   // SUM: bytes allocated/reserved by this memory alocator. This accounts | ||||
|   // SUM: bytes allocated/reserved by this memory allocator. This accounts | ||||
|   // for both free and in-use blocks. | ||||
|   Stat allocated_bytes; | ||||
|   // COUNT: number of allocations per bucket (active) | ||||
| @ -455,7 +455,7 @@ struct CachingHostAllocatorImpl { | ||||
|   } | ||||
|  | ||||
|   void resetAccumulatedStats() { | ||||
|     // Reseting accumulated memory stats requires concurrently holding both the | ||||
|     // Resetting accumulated memory stats requires concurrently holding both the | ||||
|     // free list mutexes and the blocks mutex. Previously, this was only done in | ||||
|     // empty_cache function. | ||||
|     for (size_t i = 0; i < free_list_.size(); ++i) { | ||||
| @ -482,7 +482,7 @@ struct CachingHostAllocatorImpl { | ||||
|   } | ||||
|  | ||||
|   void resetPeakStats() { | ||||
|     // Reseting peak memory stats requires concurrently holding both the | ||||
|     // Resetting peak memory stats requires concurrently holding both the | ||||
|     // free list mutexes and the blocks mutex. Previously, this was only done in | ||||
|     // empty_cache function. | ||||
|     for (size_t i = 0; i < free_list_.size(); ++i) { | ||||
|  | ||||
| @ -109,6 +109,10 @@ TORCH_LIBRARY_IMPL(_, AutogradHPU, m) { | ||||
|   m.fallback(AUTOGRAD_FALLBACK); | ||||
| } | ||||
|  | ||||
| TORCH_LIBRARY_IMPL(_, AutogradPrivateUse1, m) { | ||||
|   m.fallback(AUTOGRAD_FALLBACK); | ||||
| } | ||||
|  | ||||
| #undef AUTOGRAD_FALLBACK | ||||
|  | ||||
| } // namespace | ||||
|  | ||||
| @ -148,7 +148,7 @@ struct TORCH_API ClassType : public NamedType { | ||||
|  | ||||
|   void checkNotExist(const std::string& name, const std::string& what) const; | ||||
|  | ||||
|   // Attributes are stored in a specific slot at runtime for effiency. | ||||
|   // Attributes are stored in a specific slot at runtime for efficiency. | ||||
|   // When emitting instructions we specify the slot so that attribute access is | ||||
|   // a constant lookup | ||||
|   std::optional<size_t> findAttributeSlot(const std::string& name) const { | ||||
| @ -412,7 +412,7 @@ struct TORCH_API ClassType : public NamedType { | ||||
|   // Holds method attributes | ||||
|   std::weak_ptr<CompilationUnit> compilation_unit_; | ||||
|  | ||||
|   // Holds all atrributes, attribute details are found on ClassAttribute | ||||
|   // Holds all attributes, attribute details are found on ClassAttribute | ||||
|   std::vector<ClassAttribute> attributes_; | ||||
|   // Construct mirroring attributes_, only around due to the fact that `containedTypes()` method returns an ArrayRef. | ||||
|   // Never fill this without using the appropriate provideNewClassAttribute method | ||||
|  | ||||
| @ -442,11 +442,17 @@ RegistrationHandleRAII Dispatcher::registerFallback(DispatchKey dispatchKey, Ker | ||||
|  | ||||
|   auto idx = getDispatchTableIndexForDispatchKey(dispatchKey); | ||||
|   TORCH_CHECK(idx >= 0 && static_cast<uint64_t>(idx) < backendFallbackKernels_.size(), "idx=", idx); | ||||
|   // NB: Perserve BC for registering fallback for AutogradPrivateUse1 multiple time, | ||||
|   // refer to https://github.com/pytorch/pytorch/issues/163979 for more informations. | ||||
|   TORCH_CHECK( | ||||
|     !backendFallbackKernels_[idx].kernel.isValid(), | ||||
|     "Tried to register multiple backend fallbacks for the same dispatch key ", dispatchKey, "; previous registration ", | ||||
|     backendFallbackKernels_[idx].debug, ", new registration ", debug | ||||
|   ); | ||||
|       dispatchKey == DispatchKey::AutogradPrivateUse1 || | ||||
|           !backendFallbackKernels_[idx].kernel.isValid(), | ||||
|       "Tried to register multiple backend fallbacks for the same dispatch key ", | ||||
|       dispatchKey, | ||||
|       "; previous registration ", | ||||
|       backendFallbackKernels_[idx].debug, | ||||
|       ", new registration ", | ||||
|       debug); | ||||
|   // NB: inferred function schema is always nullptr for fallbacks, as fallbacks | ||||
|   // cannot be unboxed | ||||
|   backendFallbackKernels_[idx] = impl::AnnotatedKernel(std::move(kernel), nullptr, std::move(debug)); | ||||
| @ -531,7 +537,7 @@ int64_t Dispatcher::sequenceNumberForRunningRecordFunction(DispatchKey dispatchK | ||||
|  | ||||
|   // Note: this records a sequence number for both Autograd keys, and for | ||||
|   // non-Autograd keys where the dispatchKeySet still contains an autograd key. | ||||
|   // This means that we might collect the same sequence nubmer two different | ||||
|   // This means that we might collect the same sequence number two different | ||||
|   // events if they all occurred above Autograd and still had the Autograd | ||||
|   // dispatch key in the dispatch key set. | ||||
|   // However, this usually doesn't happen: normally the first call will | ||||
|  | ||||
| @ -585,7 +585,7 @@ class TORCH_API OperatorHandle { | ||||
|  | ||||
|   // We need to store this iterator in order to make | ||||
|   // Dispatcher::cleanup() fast -- it runs a lot on program | ||||
|   // termination (and presuambly library unloading). | ||||
|   // termination (and presumably library unloading). | ||||
|   std::list<Dispatcher::OperatorDef>::iterator operatorIterator_; | ||||
| }; | ||||
|  | ||||
|  | ||||
| @ -365,7 +365,7 @@ std::pair<const AnnotatedKernel&, const char*> OperatorEntry::computeDispatchTab | ||||
|   //          For autograd keys, we only use kernel from CompositeImplicitAutograd when there's no direct registration | ||||
|   //          to its corresponding backend key or CompositeExplicitAutograd. See Note [CompositeExplicitAutograd and CompositeImplicitAutograd]. | ||||
|   //          For AutogradOther, we eagerly return ambiguousAutogradOtherKernel() if there's registration to any of | ||||
|   //          its backends and ask backend extender to request a decicated Autograd key for the backend. | ||||
|   //          its backends and ask backend extender to request a dedicated Autograd key for the backend. | ||||
|   //          See Note [Ambiguity in AutogradOther kernel] for more details. | ||||
|   //          A CompositeExplicitAutograd kernel prevents CompositeImplicitAutograd kernel being used for Autograd keys, but it doesn't | ||||
|   //          cause confusion for AutogradOther. It's pretty straightforward to use Autograd (if available) | ||||
|  | ||||
| @ -261,7 +261,7 @@ std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema) { | ||||
|     // | ||||
|     // There are 2 cases | ||||
|     // 1. something like 'aten::items.str(Dict(str, t) self) -> ((str, t)[])'. | ||||
|     // without the extra parenthesis, the c++ schem parser can not parse it. | ||||
|     // without the extra parenthesis, the c++ scheme parser can not parse it. | ||||
|     // 2. something like '-> ((str, str))'. Need extra parenthesis so the return | ||||
|     // type is a single tuple rather than two strings. | ||||
|     // PR (https://github.com/pytorch/pytorch/pull/23204) has more context about | ||||
|  | ||||
| @ -1176,7 +1176,7 @@ struct TORCH_API IValue final { | ||||
|   using HashIdentityIValueMap = | ||||
|       std::unordered_map<IValue, IValue, HashIdentityIValue, CompIdentityIValues>; | ||||
|  | ||||
|   // Chechs if this and rhs has a subvalues in common. | ||||
|   // Checks if this and rhs has a subvalues in common. | ||||
|   // [t1,t2] and [t2, t3] returns true. | ||||
|   bool overlaps(const IValue& rhs) const; | ||||
|  | ||||
|  | ||||
| @ -1501,7 +1501,7 @@ struct C10_EXPORT ivalue::Object final : c10::intrusive_ptr_target { | ||||
|   // However, the CompilationUnit holds ownership of the type's graphs, so | ||||
|   // inserting a constant object into a Graph would create a reference cycle if | ||||
|   // that constant object held a shared_ptr to its CU. For these objects we | ||||
|   // instatiate them with non-owning references to its CU | ||||
|   // instantiate them with non-owning references to its CU | ||||
|   Object(WeakOrStrongTypePtr type, size_t numSlots) : type_(std::move(type)) { | ||||
|     slots_.resize(numSlots); | ||||
|   } | ||||
|  | ||||
| @ -373,7 +373,7 @@ struct TORCH_API SymbolicShape { | ||||
|   // Unranked shape constructor. | ||||
|   SymbolicShape() : dims_(std::nullopt) {} | ||||
|  | ||||
|   // Known rank but unknown dimentions. | ||||
|   // Known rank but unknown dimensions. | ||||
|   SymbolicShape(std::optional<size_t> rank) : dims_(std::nullopt) { | ||||
|     if(!rank) { | ||||
|       return; | ||||
| @ -884,9 +884,9 @@ struct TORCH_API ListType | ||||
|  | ||||
|   // global singleton | ||||
|   // Given an inner type T and an identifier, | ||||
|   // this function wil return the global singleton type pointer | ||||
|   // this function will return the global singleton type pointer | ||||
|   // the type List<T>. | ||||
|   // The extra "identifier" argument is needed beccause we have multiple container types | ||||
|   // The extra "identifier" argument is needed because we have multiple container types | ||||
|   // that all re-use this function (List<T>, array<T, N>, etc.) | ||||
|   static TypePtr get(const std::string& identifier, TypePtr inner); | ||||
|  | ||||
|  | ||||
| @ -185,11 +185,11 @@ struct TORCH_API Type { | ||||
|         : repr_(nullptr) {} | ||||
|  | ||||
|     /* implicit */ SingletonOrSharedTypePtr(SingletonTypePtr<T> p) | ||||
|         : repr_(p) {} | ||||
|         : repr_(makeSingletonSharedPtr(p.get())) {} | ||||
|  | ||||
|     template <typename U, std::enable_if_t<std::is_convertible_v<U*, T*>, bool> = true> | ||||
|     /* implicit */ SingletonOrSharedTypePtr(SingletonTypePtr<U> p) | ||||
|         : repr_(SingletonTypePtr<T>(p.get())) {} | ||||
|         : repr_(makeSingletonSharedPtr(static_cast<T*>(p.get()))) {} | ||||
|  | ||||
|  | ||||
|     // We need to support construction from T* for pybind. The problem | ||||
| @ -202,8 +202,8 @@ struct TORCH_API Type { | ||||
|     // Case 2: if T is exactly Type, we need to do a dynamic_cast to | ||||
|     // check if it's a SharedType and do the right thing. | ||||
|     // | ||||
|     // Case 3: Otherwise, T is not a SharedType. (debug-check this | ||||
|     // assumption!) Use a singleton pointer. | ||||
|     // Case 3: Otherwise, T is not a SharedType. Use a singleton | ||||
|     // pointer. | ||||
|  | ||||
|     template <typename U = T, std::enable_if_t<std::is_base_of_v<SharedType, U>, bool> = true> | ||||
|     /* implicit */ SingletonOrSharedTypePtr(T* p) : SingletonOrSharedTypePtr(static_cast<typename detail::as_shared_type<U>::type>(p)->shared_from_this()) {} | ||||
| @ -211,15 +211,15 @@ struct TORCH_API Type { | ||||
|     template <typename U = T, std::enable_if_t<std::is_same_v<Type, U>, bool> = true> | ||||
|     /* implicit */ SingletonOrSharedTypePtr(T* p) { | ||||
|       if (auto* shared_p = dynamic_cast<typename detail::as_shared_type<U>::type>(p)) { | ||||
|         repr_ = Repr(shared_p->shared_from_this()); | ||||
|         repr_ = shared_p->shared_from_this(); | ||||
|       } else { | ||||
|         repr_ = Repr(p); | ||||
|         repr_ = makeSingletonSharedPtr(p); | ||||
|       } | ||||
|     } | ||||
|  | ||||
|     template <typename U = T, std::enable_if_t<!std::is_same_v<Type, U> && !std::is_base_of_v<SharedType, U>, bool> = true> | ||||
|     /* implicit */ SingletonOrSharedTypePtr(T* p) | ||||
|         : repr_(p) { | ||||
|         : repr_(makeSingletonSharedPtr(p)) { | ||||
|       TORCH_INTERNAL_ASSERT_DEBUG_ONLY(dynamic_cast<typename detail::as_shared_type<U>::type>(p) == nullptr); | ||||
|     } | ||||
|  | ||||
| @ -230,19 +230,19 @@ struct TORCH_API Type { | ||||
|     ~SingletonOrSharedTypePtr() = default; | ||||
|  | ||||
|     T* get() const { | ||||
|       return repr_.isSharedAndNonNull() ? repr_.shared_.repr_.get() : static_cast<T*>(repr_.rawRepr().first); | ||||
|       return repr_.get(); | ||||
|     } | ||||
|  | ||||
|     operator bool() const { | ||||
|       return repr_.isNonNull(); | ||||
|       return repr_ != nullptr; | ||||
|     } | ||||
|  | ||||
|     bool operator==(std::nullptr_t) const { | ||||
|       return !repr_.isNonNull(); | ||||
|       return repr_ == nullptr; | ||||
|     } | ||||
|  | ||||
|     bool operator!=(std::nullptr_t) const { | ||||
|       return repr_.isNonNull(); | ||||
|       return repr_ != nullptr; | ||||
|     } | ||||
|  | ||||
|     template <typename U = T, std::enable_if_t<!std::is_same_v<std::remove_const_t<U>, void>, bool> = true> | ||||
| @ -255,138 +255,14 @@ struct TORCH_API Type { | ||||
|     } | ||||
|  | ||||
|   private: | ||||
|     // NOTE: SharedPtrWrapper exists to work around a baffling bug in | ||||
|     // nvcc; see comment in destroy() below. | ||||
|     struct SharedPtrWrapper { | ||||
|       SharedPtrWrapper(std::shared_ptr<T> &&x) | ||||
|           : repr_(std::move(x)) {} | ||||
|       std::shared_ptr<T> repr_; | ||||
|     }; | ||||
|     union Repr { | ||||
|       Repr() : Repr(nullptr) {} | ||||
|     // Use shared_ptr's aliasing constructor to create a non-owning pointer | ||||
|     // to a singleton. The lifetime is tied to the null shared_ptr, so there's | ||||
|     // no reference counting overhead for the singleton itself. | ||||
|     static std::shared_ptr<T> makeSingletonSharedPtr(T* ptr) { | ||||
|       return std::shared_ptr<T>(std::shared_ptr<T>(), ptr); | ||||
|     } | ||||
|  | ||||
|       explicit Repr(std::shared_ptr<T> x) | ||||
|           : shared_(std::move(x)) {} | ||||
|  | ||||
|       explicit Repr(std::nullptr_t) | ||||
|           : singletonRepr_(nullptr) {} | ||||
|  | ||||
|       explicit Repr(SingletonTypePtr<T> p) | ||||
|           : singletonRepr_(p.get()) {} | ||||
|  | ||||
|       ~Repr() { | ||||
|         destroy(); | ||||
|       } | ||||
|  | ||||
|       // NOTE: the only non-UB way to access our null state is through | ||||
|       // rawRepr(), because our copy operation doesn't preserve which | ||||
|       // union member is active for null pointers. | ||||
|       Repr(const Repr& rhs) { | ||||
|         if (rhs.isSharedAndNonNull()) { | ||||
|           new (&shared_) SharedPtrWrapper(rhs.shared_); | ||||
|         } else { | ||||
|           singletonRepr_.singleton_ = static_cast<T*>(rhs.rawRepr().first); | ||||
|           TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.singletonRepr_.unused_ == nullptr); | ||||
|           singletonRepr_.unused_ = nullptr; | ||||
|         } | ||||
|       } | ||||
|  | ||||
|       Repr(Repr&& rhs) noexcept { | ||||
|         if (rhs.isSharedAndNonNull()) { | ||||
|           new (&shared_) SharedPtrWrapper(std::move(rhs.shared_)); | ||||
|         } else { | ||||
|           singletonRepr_.singleton_ = static_cast<T*>(rhs.rawRepr().first); | ||||
|           TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.singletonRepr_.unused_ == nullptr); | ||||
|           singletonRepr_.unused_ = nullptr; | ||||
|         } | ||||
|       } | ||||
|  | ||||
|       Repr& operator=(const Repr& rhs) { | ||||
|         if (&rhs == this) { | ||||
|           return *this; | ||||
|         } | ||||
|         if (rhs.isSharedAndNonNull()) { | ||||
|           if (isSharedAndNonNull()) { | ||||
|             shared_ = rhs.shared_; | ||||
|           } else { | ||||
|             new (&shared_) SharedPtrWrapper(rhs.shared_); | ||||
|           } | ||||
|         } else { | ||||
|           if (isSharedAndNonNull()) { | ||||
|             destroy(); | ||||
|           } | ||||
|           singletonRepr_.singleton_ = static_cast<T*>(rhs.rawRepr().first); | ||||
|           TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.rawRepr().nullIfSingleton_ == nullptr); | ||||
|           singletonRepr_.unused_ = nullptr; | ||||
|         } | ||||
|         return *this; | ||||
|       } | ||||
|  | ||||
|       Repr& operator=(Repr&& rhs) noexcept { | ||||
|         if (&rhs == this) { | ||||
|           return *this; | ||||
|         } | ||||
|         if (rhs.isSharedAndNonNull()) { | ||||
|           if (isSharedAndNonNull()) { | ||||
|             shared_ = std::move(rhs.shared_); | ||||
|           } else { | ||||
|             new (&shared_) SharedPtrWrapper(std::move(rhs.shared_)); | ||||
|           } | ||||
|         } else { | ||||
|           if (isSharedAndNonNull()) { | ||||
|             destroy(); | ||||
|           } | ||||
|           singletonRepr_.singleton_ = static_cast<T*>(rhs.rawRepr().first); | ||||
|           TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.rawRepr().nullIfSingleton_ == nullptr); | ||||
|           singletonRepr_.unused_ = nullptr; | ||||
|         } | ||||
|         return *this; | ||||
|       } | ||||
|  | ||||
|       SharedPtrWrapper shared_; | ||||
|  | ||||
|       struct SingletonRepr { | ||||
|         explicit SingletonRepr(T* s) : singleton_(s) {} | ||||
|         T* singleton_; | ||||
|         void* unused_ = nullptr; | ||||
|       } singletonRepr_; | ||||
|       struct RawRepr { | ||||
|         void* first; | ||||
|         void* nullIfSingleton_; | ||||
|       }; | ||||
|  | ||||
|       // It is UB to read the singleton part of Repr if it was | ||||
|       // constructed as a shared_ptr and vice versa, but memcpying out | ||||
|       // the representation is always OK, so here's an accessor to obey | ||||
|       // the letter of the law. | ||||
|       RawRepr rawRepr() const { | ||||
|         RawRepr repr{}; | ||||
|         memcpy(&repr, reinterpret_cast<const char *>(this), sizeof(RawRepr)); | ||||
|         return repr; | ||||
|       } | ||||
|  | ||||
|       bool isNonNull() const { | ||||
|         auto repr = rawRepr(); | ||||
|         TORCH_INTERNAL_ASSERT_DEBUG_ONLY(repr.nullIfSingleton_ == nullptr || repr.first != nullptr); | ||||
|         return repr.first != nullptr; | ||||
|       } | ||||
|  | ||||
|       bool isSharedAndNonNull() const { | ||||
|         return rawRepr().nullIfSingleton_ != nullptr; | ||||
|       } | ||||
|  | ||||
|      private: | ||||
|       void destroy() { | ||||
|         if (isSharedAndNonNull()) { | ||||
|           // Without SharedPtrWrapper, this line would read | ||||
|           // `shared_.~shared_ptr()` and nvcc would complain with | ||||
|           // "error: expected primary-expression before '>' token" | ||||
|           // referring to the "t" in "shared_ptr". SharedPtrWrapper | ||||
|           // exists to work around this compiler bug. | ||||
|           shared_.~SharedPtrWrapper(); | ||||
|         } | ||||
|       } | ||||
|     } repr_; | ||||
|     std::shared_ptr<T> repr_; | ||||
|   }; | ||||
|  | ||||
|   using TypePtr = SingletonOrSharedTypePtr<Type>; | ||||
|  | ||||
| @ -21,7 +21,7 @@ namespace c10 { | ||||
|  | ||||
| namespace detail { | ||||
| // The first argument of the schema might be of type DispatchKeySet, in which case we remove it. | ||||
| // We do this because every argument in a function schema is expected to be convertable | ||||
| // We do this because every argument in a function schema is expected to be convertible | ||||
| // to an ivalue, but DispatchKeySet is not a type we want the jit to be aware of. | ||||
| // See Note [Plumbing Keys Through The Dispatcher] | ||||
| template<class KernelFunctor> | ||||
|  | ||||
| @ -251,7 +251,7 @@ TEST(OperatorRegistrationTest, whenRegisteringCPUTensorType_thenCanOnlyCallUnbox | ||||
|   callOpUnboxedWithPrecomputedDispatchKeySet<void, Tensor>(*op, c10::DispatchKeySet(c10::DispatchKey::CPU), dummyTensor(c10::DispatchKey::CUDA)); | ||||
|   EXPECT_TRUE(called_kernel_cpu); | ||||
|  | ||||
|   // Ensure that disptach key from tensor is not used here. | ||||
|   // Ensure that dispatch key from tensor is not used here. | ||||
|   called_kernel_cpu = false; | ||||
|   expectThrows<c10::Error>([&] { | ||||
|     callOpUnboxedWithPrecomputedDispatchKeySet<void, Tensor>(*op, c10::DispatchKeySet(c10::DispatchKey::CUDA), dummyTensor(c10::DispatchKey::CPU)); | ||||
|  | ||||
| @ -172,7 +172,7 @@ VaryingShape<Stride> TensorType::computeStrideProps( | ||||
|   // The logic below follows what TensorIterator uses in its logic: | ||||
|   //   1. Fast_set_up is the short-cut to identify a. channels_last and | ||||
|   //      b. contiguous format, which is what we have in the below logic. | ||||
|   //   2. In more generla cases, it does best effort to preserve permutatoin. | ||||
|   //   2. In more general cases, it does best effort to preserve permutatoin. | ||||
|   if (is_channels_last_strides_2d(sizes, strides) || is_channels_last_strides_3d(sizes, strides)) { | ||||
|     // case 1.a. short cut channels last | ||||
|     std::iota(stride_indices.rbegin() + 1, stride_indices.rend() - 1, 2); | ||||
|  | ||||
| @ -104,71 +104,6 @@ class Vectorized<float> { | ||||
|     } | ||||
|     return b; | ||||
|   } | ||||
|   // Implementation is picked from | ||||
|   // https://github.com/ARM-software/ComputeLibrary/blob/v25.01/src/core/NEON/SVEMath.inl#L105 | ||||
|   inline svfloat32_t svexp_f32_z(svbool_t pg, svfloat32_t x) const { | ||||
|     const auto c1 = | ||||
|         svreinterpret_f32_u32(svdup_n_u32(0x3f7ffff6)); // x^1: 0x1.ffffecp-1f | ||||
|     const auto c2 = | ||||
|         svreinterpret_f32_u32(svdup_n_u32(0x3efffedb)); // x^2: 0x1.fffdb6p-2f | ||||
|     const auto c3 = | ||||
|         svreinterpret_f32_u32(svdup_n_u32(0x3e2aaf33)); // x^3: 0x1.555e66p-3f | ||||
|     const auto c4 = | ||||
|         svreinterpret_f32_u32(svdup_n_u32(0x3d2b9f17)); // x^4: 0x1.573e2ep-5f | ||||
|     const auto c5 = | ||||
|         svreinterpret_f32_u32(svdup_n_u32(0x3c072010)); // x^5: 0x1.0e4020p-7f | ||||
|     const auto shift = svreinterpret_f32_u32( | ||||
|         svdup_n_u32(0x4b00007f)); // 2^23 + 127 = 0x1.0000fep23f | ||||
|     const auto inv_ln2 = svreinterpret_f32_u32( | ||||
|         svdup_n_u32(0x3fb8aa3b)); // 1 / ln(2) = 0x1.715476p+0f | ||||
|     const auto neg_ln2_hi = svreinterpret_f32_u32(svdup_n_u32( | ||||
|         0xbf317200)); // -ln(2) from bits  -1 to -19: -0x1.62e400p-1f | ||||
|     const auto neg_ln2_lo = svreinterpret_f32_u32(svdup_n_u32( | ||||
|         0xb5bfbe8e)); // -ln(2) from bits -20 to -42: -0x1.7f7d1cp-20f | ||||
|     const auto inf = svdup_n_f32(std::numeric_limits<float>::infinity()); | ||||
|     const auto max_input = svdup_n_f32(88.37f); // Approximately ln(2^127.5) | ||||
|     const auto zero = svdup_n_f32(0.f); | ||||
|     const auto min_input = svdup_n_f32(-86.64f); // Approximately ln(2^-125) | ||||
|     // Range reduction: | ||||
|     //   e^x = 2^n * e^r | ||||
|     // where: | ||||
|     //   n = floor(x / ln(2)) | ||||
|     //   r = x - n * ln(2) | ||||
|     // | ||||
|     // By adding x / ln(2) with 2^23 + 127 (shift): | ||||
|     //   * As FP32 fraction part only has 23-bits, the addition of 2^23 + 127 | ||||
|     //   forces decimal part | ||||
|     //     of x / ln(2) out of the result. The integer part of x / ln(2) (i.e. | ||||
|     //     n) + 127 will occupy the whole fraction part of z in FP32 format. | ||||
|     //     Subtracting 2^23 + 127 (shift) from z will result in the integer part | ||||
|     //     of x / ln(2) (i.e. n) because the decimal part has been pushed out | ||||
|     //     and lost. | ||||
|     //   * The addition of 127 makes the FP32 fraction part of z ready to be | ||||
|     //   used as the exponent | ||||
|     //     in FP32 format. Left shifting z by 23 bits will result in 2^n. | ||||
|     const auto z = svmla_f32_z(pg, shift, x, inv_ln2); | ||||
|     const auto n = svsub_f32_z(pg, z, shift); | ||||
|     const auto scale = svreinterpret_f32_u32( | ||||
|         svlsl_n_u32_z(pg, svreinterpret_u32_f32(z), 23)); // 2^n | ||||
|     // The calculation of n * ln(2) is done using 2 steps to achieve accuracy | ||||
|     // beyond FP32. This outperforms longer Taylor series (3-4 tabs) both in | ||||
|     // term of accuracy and performance. | ||||
|     const auto r_hi = svmla_f32_z(pg, x, n, neg_ln2_hi); | ||||
|     const auto r = svmla_f32_z(pg, r_hi, n, neg_ln2_lo); | ||||
|     // Compute the truncated Taylor series of e^r. | ||||
|     //   poly = scale * (1 + c1 * r + c2 * r^2 + c3 * r^3 + c4 * r^4 + c5 * r^5) | ||||
|     const auto r2 = svmul_f32_z(pg, r, r); | ||||
|     const auto p1 = svmul_f32_z(pg, c1, r); | ||||
|     const auto p23 = svmla_f32_z(pg, c2, c3, r); | ||||
|     const auto p45 = svmla_f32_z(pg, c4, c5, r); | ||||
|     const auto p2345 = svmla_f32_z(pg, p23, p45, r2); | ||||
|     const auto p12345 = svmla_f32_z(pg, p1, p2345, r2); | ||||
|     auto poly = svmla_f32_z(pg, scale, p12345, scale); | ||||
|     // Handle underflow and overflow. | ||||
|     poly = svsel_f32(svcmplt_f32(pg, x, min_input), zero, poly); | ||||
|     poly = svsel_f32(svcmpgt_f32(pg, x, max_input), inf, poly); | ||||
|     return poly; | ||||
|   } | ||||
|   static Vectorized<float> loadu(const void* ptr, int64_t count = size()) { | ||||
|     if (count == size()) | ||||
|       return svld1_f32(ptrue, reinterpret_cast<const float*>(ptr)); | ||||
| @ -313,11 +248,41 @@ class Vectorized<float> { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<float>(Sleef_expm1fx_u10sve(values)), map(std::expm1)); | ||||
|   } | ||||
|   // Implementation copied from Arm Optimized Routines: | ||||
|   // https://github.com/ARM-software/optimized-routines/blob/master/math/aarch64/sve/expf.c | ||||
|   Vectorized<float> exp_u20() const { | ||||
|     return exp(); | ||||
|     // special case to handle special inputs that are too large or too small | ||||
|     // i.e. where there's at least one element x, s.t. |x| >= 87.3... | ||||
|     svbool_t is_special_case = svacgt(svptrue_b32(), values, 0x1.5d5e2ap+6f); | ||||
|     if (svptest_any(svptrue_b32(), is_special_case)) { | ||||
|       return exp(); | ||||
|     } | ||||
|     const svfloat32_t ln2_hi = svdup_n_f32(0x1.62e4p-1f); | ||||
|     const svfloat32_t ln2_lo = svdup_n_f32(0x1.7f7d1cp-20f); | ||||
|     const svfloat32_t c1 = svdup_n_f32(0.5f); | ||||
|     const svfloat32_t inv_ln2 = svdup_n_f32(0x1.715476p+0f); | ||||
|  | ||||
|     const float shift = 0x1.803f8p17f; | ||||
|  | ||||
|     /* n = round(x/(ln2/N)).  */ | ||||
|     svfloat32_t z = svmad_x(svptrue_b32(), inv_ln2, values, shift); | ||||
|     svfloat32_t n = svsub_x(svptrue_b32(), z, shift); | ||||
|  | ||||
|     /* r = x - n*ln2/N.  */ | ||||
|     svfloat32_t r = values; | ||||
|     r = svmls_x(svptrue_b32(), r, n, ln2_hi); | ||||
|     r = svmls_x(svptrue_b32(), r, n, ln2_lo); | ||||
|  | ||||
|     /* scale = 2^(n/N).  */ | ||||
|     svfloat32_t scale = svexpa(svreinterpret_u32(z)); | ||||
|  | ||||
|     /* poly(r) = exp(r) - 1 ~= r + 0.5 r^2.  */ | ||||
|     svfloat32_t r2 = svmul_x(svptrue_b32(), r, r); | ||||
|     svfloat32_t poly = svmla_x(svptrue_b32(), r, r2, c1); | ||||
|     return svmla_x(svptrue_b32(), scale, scale, poly); | ||||
|   } | ||||
|   Vectorized<float> fexp_u20() const { | ||||
|     return exp(); | ||||
|     return exp_u20(); | ||||
|   } | ||||
|   Vectorized<float> fmod(const Vectorized<float>& q) const {USE_SLEEF( | ||||
|       { return Vectorized<float>(Sleef_fmodfx_sve(values, q)); }, | ||||
| @ -453,9 +418,11 @@ class Vectorized<float> { | ||||
|         ptrue, svmax_f32_z(ptrue, values, CONST_MIN_TANH), CONST_MAX_TANH); | ||||
|  | ||||
|     // Step 2: Calculate exp(2 * x), where x is the clamped value. | ||||
|     // svmul_f32_z computes 2 * x, and svexp_f32_z computes the exponential of | ||||
|     // the result. | ||||
|     svfloat32_t exp2x = svexp_f32_z(ptrue, svmul_f32_z(ptrue, CONST_2, x)); | ||||
|     // svmul_f32_z computes 2 * x, and exp_u20() computes the exponential of | ||||
|     // the result (via Vectorized<float>, then auto-converts back to | ||||
|     // svfloat32_t). | ||||
|     svfloat32_t exp2x = | ||||
|         Vectorized<float>(svmul_f32_z(ptrue, CONST_2, x)).exp_u20(); | ||||
|  | ||||
|     // Step 3: Calculate the numerator of the tanh function, which is exp(2x) | ||||
|     // - 1. | ||||
|  | ||||
| @ -6,6 +6,7 @@ | ||||
| #ifdef __aarch64__ | ||||
| #if !defined(CPU_CAPABILITY_SVE) | ||||
| #include <ATen/cpu/vec/vec128/vec128_bfloat16_neon.h> | ||||
| #include <ATen/cpu/vec/vec128/vec128_double_neon.h> | ||||
| #include <ATen/cpu/vec/vec128/vec128_float_neon.h> | ||||
| #include <ATen/cpu/vec/vec128/vec128_half_neon.h> | ||||
| #include <ATen/cpu/vec/vec128/vec128_int_aarch64.h> | ||||
|  | ||||
| @ -5,6 +5,129 @@ | ||||
| namespace at::vec { | ||||
| inline namespace CPU_CAPABILITY { | ||||
| #if (defined(__aarch64__) && !defined(CPU_CAPABILITY_SVE256)) | ||||
|  | ||||
| // Enable auto-vectorization for GCC-13+ and clang-17+ | ||||
| // GCC-12 has a bug: gcc.gnu.org/bugzilla/show_bug.cgi?id=117001 | ||||
| #if __GNUC__ > 12 || (defined(__clang__) && (__clang_major__ >= 17)) | ||||
|  | ||||
| template <typename from_type, typename to_type> | ||||
| inline void convertImpl( | ||||
|     const from_type* __restrict src, | ||||
|     to_type* __restrict dst, | ||||
|     int64_t n) { | ||||
|   uint64_t len = static_cast<uint64_t>(n); | ||||
|   for (uint64_t i = 0; i < len; i++) { | ||||
|     dst[i] = static_cast<to_type>(src[i]); | ||||
|   } | ||||
| } | ||||
|  | ||||
| #define CONVERT_TEMPLATE(from_type, to_type)                           \ | ||||
|   template <>                                                          \ | ||||
|   inline void convert(const from_type* src, to_type* dst, int64_t n) { \ | ||||
|     return convertImpl<from_type, to_type>(src, dst, n);               \ | ||||
|   } | ||||
|  | ||||
| CONVERT_TEMPLATE(uint8_t, uint8_t) | ||||
| CONVERT_TEMPLATE(uint8_t, int8_t) | ||||
| CONVERT_TEMPLATE(uint8_t, int16_t) | ||||
| CONVERT_TEMPLATE(uint8_t, int32_t) | ||||
| CONVERT_TEMPLATE(uint8_t, int64_t) | ||||
| CONVERT_TEMPLATE(uint8_t, float) | ||||
| CONVERT_TEMPLATE(uint8_t, double) | ||||
| CONVERT_TEMPLATE(int8_t, uint8_t) | ||||
| CONVERT_TEMPLATE(int8_t, int8_t) | ||||
| CONVERT_TEMPLATE(int8_t, int16_t) | ||||
| CONVERT_TEMPLATE(int8_t, int32_t) | ||||
| CONVERT_TEMPLATE(int8_t, int64_t) | ||||
| CONVERT_TEMPLATE(int8_t, float) | ||||
| CONVERT_TEMPLATE(int8_t, double) | ||||
| CONVERT_TEMPLATE(int16_t, uint8_t) | ||||
| CONVERT_TEMPLATE(int16_t, int8_t) | ||||
| CONVERT_TEMPLATE(int16_t, int16_t) | ||||
| CONVERT_TEMPLATE(int16_t, int32_t) | ||||
| CONVERT_TEMPLATE(int16_t, int64_t) | ||||
| CONVERT_TEMPLATE(int16_t, float) | ||||
| CONVERT_TEMPLATE(int16_t, double) | ||||
| CONVERT_TEMPLATE(int32_t, uint8_t) | ||||
| CONVERT_TEMPLATE(int32_t, int8_t) | ||||
| CONVERT_TEMPLATE(int32_t, int16_t) | ||||
| CONVERT_TEMPLATE(int32_t, int32_t) | ||||
| CONVERT_TEMPLATE(int32_t, int64_t) | ||||
| CONVERT_TEMPLATE(int32_t, float) | ||||
| CONVERT_TEMPLATE(int32_t, double) | ||||
| CONVERT_TEMPLATE(int64_t, uint8_t) | ||||
| CONVERT_TEMPLATE(int64_t, int8_t) | ||||
| CONVERT_TEMPLATE(int64_t, int16_t) | ||||
| CONVERT_TEMPLATE(int64_t, int32_t) | ||||
| CONVERT_TEMPLATE(int64_t, int64_t) | ||||
| CONVERT_TEMPLATE(int64_t, float) | ||||
| CONVERT_TEMPLATE(int64_t, double) | ||||
| CONVERT_TEMPLATE(float, uint8_t) | ||||
| CONVERT_TEMPLATE(float, int8_t) | ||||
| CONVERT_TEMPLATE(float, int16_t) | ||||
| CONVERT_TEMPLATE(float, int32_t) | ||||
| CONVERT_TEMPLATE(float, int64_t) | ||||
| CONVERT_TEMPLATE(float, float) | ||||
| CONVERT_TEMPLATE(float, double) | ||||
| CONVERT_TEMPLATE(double, uint8_t) | ||||
| CONVERT_TEMPLATE(double, int8_t) | ||||
| CONVERT_TEMPLATE(double, int16_t) | ||||
| CONVERT_TEMPLATE(double, int32_t) | ||||
| CONVERT_TEMPLATE(double, int64_t) | ||||
| CONVERT_TEMPLATE(double, float) | ||||
| CONVERT_TEMPLATE(double, double) | ||||
| #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC | ||||
|  | ||||
| #define CONVERT_FROM_FP16_TEMPLATE(to_type)                            \ | ||||
|   template <>                                                          \ | ||||
|   inline void convert(const at::Half* src, to_type* dst, int64_t n) {  \ | ||||
|     const float16_t* srcPtr = reinterpret_cast<const float16_t*>(src); \ | ||||
|     return convertImpl<float16_t, to_type>(srcPtr, dst, n);            \ | ||||
|   } | ||||
|  | ||||
| #define CONVERT_TO_FP16_TEMPLATE(from_type)                             \ | ||||
|   template <>                                                           \ | ||||
|   inline void convert(const from_type* src, at::Half* dst, int64_t n) { \ | ||||
|     float16_t* dstPtr = reinterpret_cast<float16_t*>(dst);              \ | ||||
|     return convertImpl<from_type, float16_t>(src, dstPtr, n);           \ | ||||
|   } | ||||
|  | ||||
| CONVERT_FROM_FP16_TEMPLATE(uint8_t) | ||||
| CONVERT_FROM_FP16_TEMPLATE(int8_t) | ||||
| CONVERT_FROM_FP16_TEMPLATE(int16_t) | ||||
| CONVERT_FROM_FP16_TEMPLATE(int32_t) | ||||
| CONVERT_FROM_FP16_TEMPLATE(int64_t) | ||||
| CONVERT_FROM_FP16_TEMPLATE(float16_t) | ||||
| CONVERT_FROM_FP16_TEMPLATE(float) | ||||
| CONVERT_FROM_FP16_TEMPLATE(double) | ||||
| CONVERT_TO_FP16_TEMPLATE(uint8_t) | ||||
| CONVERT_TO_FP16_TEMPLATE(int8_t) | ||||
| CONVERT_TO_FP16_TEMPLATE(int16_t) | ||||
| CONVERT_TO_FP16_TEMPLATE(int32_t) | ||||
| CONVERT_TO_FP16_TEMPLATE(int64_t) | ||||
| CONVERT_TO_FP16_TEMPLATE(float) | ||||
| CONVERT_TO_FP16_TEMPLATE(double) | ||||
| #endif | ||||
| #ifdef __ARM_FEATURE_BF16 | ||||
| CONVERT_TEMPLATE(bfloat16_t, uint8_t) | ||||
| CONVERT_TEMPLATE(bfloat16_t, int8_t) | ||||
| CONVERT_TEMPLATE(bfloat16_t, int16_t) | ||||
| CONVERT_TEMPLATE(bfloat16_t, int32_t) | ||||
| CONVERT_TEMPLATE(bfloat16_t, int64_t) | ||||
| CONVERT_TEMPLATE(bfloat16_t, bfloat16_t) | ||||
| CONVERT_TEMPLATE(bfloat16_t, float) | ||||
| CONVERT_TEMPLATE(bfloat16_t, double) | ||||
| CONVERT_TEMPLATE(uint8_t, bfloat16_t) | ||||
| CONVERT_TEMPLATE(int8_t, bfloat16_t) | ||||
| CONVERT_TEMPLATE(int16_t, bfloat16_t) | ||||
| CONVERT_TEMPLATE(int32_t, bfloat16_t) | ||||
| CONVERT_TEMPLATE(int64_t, bfloat16_t) | ||||
| CONVERT_TEMPLATE(float, bfloat16_t) | ||||
| CONVERT_TEMPLATE(double, bfloat16_t) | ||||
| #endif | ||||
|  | ||||
| #endif | ||||
|  | ||||
| template <typename src_t> | ||||
| struct VecConvert< | ||||
|     float, | ||||
|  | ||||
							
								
								
									
										586
									
								
								aten/src/ATen/cpu/vec/vec128/vec128_double_neon.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										586
									
								
								aten/src/ATen/cpu/vec/vec128/vec128_double_neon.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,586 @@ | ||||
| #pragma once | ||||
|  | ||||
| #include <ATen/cpu/vec/intrinsics.h> | ||||
| #include <ATen/cpu/vec/vec_base.h> | ||||
| #include <c10/macros/Macros.h> | ||||
| #include <c10/util/irange.h> | ||||
| #include <cmath> | ||||
|  | ||||
| namespace at::vec { | ||||
| // Note [CPU_CAPABILITY namespace] | ||||
| // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
| // This header, and all of its subheaders, will be compiled with | ||||
| // different architecture flags for each supported set of vector | ||||
| // intrinsics. So we need to make sure they aren't inadvertently | ||||
| // linked together. We do this by declaring objects in an `inline | ||||
| // namespace` which changes the name mangling, but can still be | ||||
| // accessed as `at::vec`. | ||||
| inline namespace CPU_CAPABILITY { | ||||
|  | ||||
| template <> | ||||
| struct is_vec_specialized_for<double> : std::bool_constant<true> {}; | ||||
|  | ||||
| template <> | ||||
| class Vectorized<double> { | ||||
|  private: | ||||
|   float64x2_t values; | ||||
|  | ||||
|  public: | ||||
|   using value_type = double; | ||||
|   using size_type = int; | ||||
|   static constexpr size_type size() { | ||||
|     return 2; | ||||
|   } | ||||
|   Vectorized() { | ||||
|     values = vdupq_n_f64(0.0); | ||||
|   } | ||||
|   Vectorized(float64x2_t v) : values(v) {} | ||||
|   Vectorized(double val) { | ||||
|     values = vdupq_n_f64(val); | ||||
|   } | ||||
|   template < | ||||
|       typename... Args, | ||||
|       typename = std::enable_if_t<(sizeof...(Args) == size())>> | ||||
|   Vectorized(Args... vals) { | ||||
|     __at_align__ double buffer[size()] = {vals...}; | ||||
|     values = vld1q_f64(buffer); | ||||
|   } | ||||
|   operator float64x2_t() const { | ||||
|     return values; | ||||
|   } | ||||
|   template <int64_t mask> | ||||
|   static Vectorized<double> blend( | ||||
|       const Vectorized<double>& a, | ||||
|       const Vectorized<double>& b) { | ||||
|     // Build an array of flags: each bit of element is 1 if the corresponding | ||||
|     // bit in 'mask' is set, 0 otherwise. | ||||
|     uint64x2_t maskArray = { | ||||
|         (mask & 1ULL) ? 0xFFFFFFFFFFFFFFFF : 0, | ||||
|         (mask & 2ULL) ? 0xFFFFFFFFFFFFFFFF : 0}; | ||||
|     // Use BSL to select elements from b where the mask is 1, else from a | ||||
|     return vbslq_f64(maskArray, b.values, a.values); | ||||
|   } | ||||
|   static Vectorized<double> blendv( | ||||
|       const Vectorized<double>& a, | ||||
|       const Vectorized<double>& b, | ||||
|       const Vectorized<double>& mask_) { | ||||
|     return vbslq_f64(vreinterpretq_u64_f64(mask_.values), b.values, a.values); | ||||
|   } | ||||
|   template <typename step_t> | ||||
|   static Vectorized<double> arange( | ||||
|       double base = 0., | ||||
|       step_t step = static_cast<step_t>(1)) { | ||||
|     return {base, base + static_cast<double>(step)}; | ||||
|   } | ||||
|   static inline Vectorized<double> set( | ||||
|       const Vectorized<double>& a, | ||||
|       const Vectorized<double>& b, | ||||
|       int64_t count = size()) { | ||||
|     if (count == 0) { | ||||
|       return a; | ||||
|     } else if (count >= 2) { | ||||
|       return b; | ||||
|     } else { | ||||
|       float64x2_t c = {b.values[0], a.values[1]}; | ||||
|       return c; | ||||
|     } | ||||
|   } | ||||
|   static Vectorized<double> loadu(const void* ptr, int64_t count = size()) { | ||||
|     if (count == size()) { | ||||
|       return vld1q_f64(reinterpret_cast<const double*>(ptr)); | ||||
|     } else if (count == 1) { | ||||
|       float64x1_t x = vld1_f64(reinterpret_cast<const double*>(ptr)); | ||||
|       float64x1_t z = {0.0}; | ||||
|       return vcombine_f64(x, z); | ||||
|     } else { | ||||
|       return vdupq_n_f64(0.0); | ||||
|     } | ||||
|   } | ||||
|   void store(void* ptr, int64_t count = size()) const { | ||||
|     if (count == size()) { | ||||
|       vst1q_f64(reinterpret_cast<double*>(ptr), values); | ||||
|     } else if (count == 1) { | ||||
|       vst1_f64(reinterpret_cast<double*>(ptr), vget_low_f64(values)); | ||||
|     } | ||||
|   } | ||||
|   const double& operator[](int idx) const = delete; | ||||
|   double& operator[](int idx) = delete; | ||||
|   int64_t zero_mask() const { | ||||
|     // returns an integer mask where all zero elements are translated to 1-bit | ||||
|     // and others are translated to 0-bit | ||||
|     uint64x2_t cmpReg = vceqzq_f64(values); | ||||
|     uint64x2_t mask = {1, 2}; | ||||
|     uint64x2_t res = vandq_u64(cmpReg, mask); | ||||
|     return res[0] | res[1]; | ||||
|   } | ||||
|   Vectorized<double> isnan() const { | ||||
|     // NaN check | ||||
|     return vreinterpretq_f64_u32( | ||||
|         vmvnq_u32(vreinterpretq_u32_u64(vceqq_f64(values, values)))); | ||||
|   } | ||||
|   bool has_inf_nan() const { | ||||
|     Vectorized<double> x = vsubq_f64(values, values); | ||||
|     float64x2_t r = x.isnan(); | ||||
|     uint64x2_t u = vreinterpretq_u64_f64(r); | ||||
|     return u[0] | u[1]; | ||||
|   } | ||||
|   Vectorized<double> map(double (*f)(double)) const { | ||||
|     float64x2_t result; | ||||
|     result[0] = f(values[0]); | ||||
|     result[1] = f(values[1]); | ||||
|     return result; | ||||
|   } | ||||
|   Vectorized<double> map2( | ||||
|       const Vectorized<double>& second, | ||||
|       double (*const f)(double, double)) const { | ||||
|     float64x2_t result; | ||||
|     result[0] = f(values[0], second.values[0]); | ||||
|     result[1] = f(values[1], second.values[1]); | ||||
|     return result; | ||||
|   } | ||||
|   Vectorized<double> abs() const { | ||||
|     return vabsq_f64(values); | ||||
|   } | ||||
|   Vectorized<double> angle() const { | ||||
|     auto zero = Vectorized<double>(0.0); | ||||
|     auto pi = Vectorized<double>(c10::pi<double>); | ||||
|     auto tmp = blendv(zero, pi, vreinterpretq_f64_u64(vcltzq_f64(values))); | ||||
|     return blendv(tmp, *this, isnan()); | ||||
|   } | ||||
|   Vectorized<double> real() const { | ||||
|     return *this; | ||||
|   } | ||||
|   Vectorized<double> imag() const { | ||||
|     return Vectorized<double>(0.0); | ||||
|   } | ||||
|   Vectorized<double> conj() const { | ||||
|     return *this; | ||||
|   } | ||||
|   Vectorized<double> acos() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_acosd2_u10(values)), map(std::acos)); | ||||
|   } | ||||
|   Vectorized<double> acosh() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_acoshd2_u10(values)), map(std::acosh)); | ||||
|   } | ||||
|   Vectorized<double> asin() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_asind2_u10(values)), map(std::asin)); | ||||
|   } | ||||
|   Vectorized<double> asinh() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_asinhd2_u10(values)), map(std::asinh)); | ||||
|   } | ||||
|   Vectorized<double> atan() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_atand2_u10(values)), map(std::atan)); | ||||
|   } | ||||
|   Vectorized<double> atanh() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_atanhd2_u10(values)), map(std::atanh)); | ||||
|   } | ||||
|   Vectorized<double> atan2(const Vectorized<double>& b) const {USE_SLEEF( | ||||
|       { return Vectorized<double>(Sleef_atan2d2_u10(values, b)); }, | ||||
|       { | ||||
|         __at_align__ double tmp[size()]; | ||||
|         __at_align__ double tmp_b[size()]; | ||||
|         store(tmp); | ||||
|         b.store(tmp_b); | ||||
|         for (int64_t i = 0; i < size(); i++) { | ||||
|           tmp[i] = std::atan2(tmp[i], tmp_b[i]); | ||||
|         } | ||||
|         return loadu(tmp); | ||||
|       })} Vectorized<double> copysign(const Vectorized<double>& sign) const { | ||||
|       USE_SLEEF( | ||||
|           { return Vectorized<double>(Sleef_copysignd2(values, sign)); }, | ||||
|           { | ||||
|             __at_align__ double tmp[size()]; | ||||
|             __at_align__ double tmp_sign[size()]; | ||||
|             store(tmp); | ||||
|             sign.store(tmp_sign); | ||||
|             for (int64_t i = 0; i < size(); i++) { | ||||
|               tmp[i] = std::copysign(tmp[i], tmp_sign[i]); | ||||
|             } | ||||
|             return loadu(tmp); | ||||
|           })} Vectorized<double> erf() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_erfd2_u10(values)), map(std::erf)); | ||||
|   } | ||||
|   Vectorized<double> erfc() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_erfcd2_u15(values)), map(std::erfc)); | ||||
|   } | ||||
|   Vectorized<double> exp() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_expd2_u10(values)), map(std::exp)); | ||||
|   } | ||||
|   Vectorized<double> exp2() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_exp2d2_u10(values)), map(std::exp2)); | ||||
|   } | ||||
|   Vectorized<double> expm1() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_expm1d2_u10(values)), map(std::expm1)); | ||||
|   } | ||||
|   Vectorized<double> fmod(const Vectorized<double>& q) const {USE_SLEEF( | ||||
|       { return Vectorized<double>(Sleef_fmodd2(values, q)); }, | ||||
|       { | ||||
|         __at_align__ double tmp[size()]; | ||||
|         __at_align__ double tmp_q[size()]; | ||||
|         store(tmp); | ||||
|         q.store(tmp_q); | ||||
|         for (int64_t i = 0; i < size(); i++) { | ||||
|           tmp[i] = std::fmod(tmp[i], tmp_q[i]); | ||||
|         } | ||||
|         return loadu(tmp); | ||||
|       })} Vectorized<double> hypot(const Vectorized<double>& b) const { | ||||
|       USE_SLEEF( | ||||
|           { return Vectorized<double>(Sleef_hypotd2_u05(values, b)); }, | ||||
|           { | ||||
|             __at_align__ double tmp[size()]; | ||||
|             __at_align__ double tmp_b[size()]; | ||||
|             store(tmp); | ||||
|             b.store(tmp_b); | ||||
|             for (int64_t i = 0; i < size(); i++) { | ||||
|               tmp[i] = std::hypot(tmp[i], tmp_b[i]); | ||||
|             } | ||||
|             return loadu(tmp); | ||||
|           })} Vectorized<double> i0() const { | ||||
|     return map(calc_i0); | ||||
|   } | ||||
|   Vectorized<double> nextafter(const Vectorized<double>& b) const {USE_SLEEF( | ||||
|       { return Vectorized<double>(Sleef_nextafterd2(values, b)); }, | ||||
|       { | ||||
|         __at_align__ double tmp[size()]; | ||||
|         __at_align__ double tmp_b[size()]; | ||||
|         store(tmp); | ||||
|         b.store(tmp_b); | ||||
|         for (int64_t i = 0; i < size(); ++i) { | ||||
|           tmp[i] = std::nextafter(tmp[i], tmp_b[i]); | ||||
|         } | ||||
|         return loadu(tmp); | ||||
|       })} Vectorized<double> log() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_logd2_u10(values)), map(std::log)); | ||||
|   } | ||||
|   Vectorized<double> log2() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_log2d2_u10(values)), map(std::log2)); | ||||
|   } | ||||
|   Vectorized<double> log10() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_log10d2_u10(values)), map(std::log10)); | ||||
|   } | ||||
|   Vectorized<double> log1p() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_log1pd2_u10(values)), map(std::log1p)); | ||||
|   } | ||||
|   Vectorized<double> frac() const; | ||||
|   Vectorized<double> sin() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_sind2_u10(values)), map(std::sin)); | ||||
|   } | ||||
|   Vectorized<double> sinh() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_sinhd2_u10(values)), map(std::sinh)); | ||||
|   } | ||||
|   Vectorized<double> cos() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_cosd2_u10(values)), map(std::cos)); | ||||
|   } | ||||
|   Vectorized<double> cosh() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_coshd2_u10(values)), map(std::cosh)); | ||||
|   } | ||||
|   Vectorized<double> pow(const Vectorized<double>& b) const {USE_SLEEF( | ||||
|       { return Vectorized<double>(Sleef_powd2_u10(values, b)); }, | ||||
|       { | ||||
|         __at_align__ double tmp[size()]; | ||||
|         __at_align__ double tmp_b[size()]; | ||||
|         store(tmp); | ||||
|         b.store(tmp_b); | ||||
|         for (int64_t i = 0; i < size(); i++) { | ||||
|           tmp[i] = std::pow(tmp[i], tmp_b[i]); | ||||
|         } | ||||
|         return loadu(tmp); | ||||
|       })} // Comparison using the _CMP_**_OQ predicate. | ||||
|           //   `O`: get false if an operand is NaN | ||||
|           //   `Q`: do not raise if an operand is NaN | ||||
|   Vectorized<double> tan() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_tand2_u10(values)), map(std::tan)); | ||||
|   } | ||||
|   Vectorized<double> tanh() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_tanhd2_u10(values)), map(std::tanh)); | ||||
|   } | ||||
|   Vectorized<double> lgamma() const { | ||||
|     return USE_SLEEF( | ||||
|         Vectorized<double>(Sleef_lgammad2_u10(values)), map(std::lgamma)); | ||||
|   } | ||||
|   Vectorized<double> erfinv() const { | ||||
|     return map(calc_erfinv); | ||||
|   } | ||||
|   Vectorized<double> exp_u20() const { | ||||
|     return exp(); | ||||
|   } | ||||
|   Vectorized<double> fexp_u20() const { | ||||
|     return exp(); | ||||
|   } | ||||
|   Vectorized<double> i0e() const { | ||||
|     return map(calc_i0e); | ||||
|   } | ||||
|   Vectorized<double> digamma() const { | ||||
|     return map(calc_digamma); | ||||
|   } | ||||
|   Vectorized<double> igamma(const Vectorized<double>& x) const { | ||||
|     __at_align__ double tmp[size()]; | ||||
|     __at_align__ double tmp_x[size()]; | ||||
|     store(tmp); | ||||
|     x.store(tmp_x); | ||||
|     for (int64_t i = 0; i < size(); i++) { | ||||
|       tmp[i] = calc_igamma(tmp[i], tmp_x[i]); | ||||
|     } | ||||
|     return loadu(tmp); | ||||
|   } | ||||
|   Vectorized<double> igammac(const Vectorized<double>& x) const { | ||||
|     __at_align__ double tmp[size()]; | ||||
|     __at_align__ double tmp_x[size()]; | ||||
|     store(tmp); | ||||
|     x.store(tmp_x); | ||||
|     for (int64_t i = 0; i < size(); i++) { | ||||
|       tmp[i] = calc_igammac(tmp[i], tmp_x[i]); | ||||
|     } | ||||
|     return loadu(tmp); | ||||
|   } | ||||
|   Vectorized<double> ceil() const { | ||||
|     return vrndpq_f64(values); | ||||
|   } | ||||
|   Vectorized<double> floor() const { | ||||
|     return vrndmq_f64(values); | ||||
|   } | ||||
|   Vectorized<double> neg() const { | ||||
|     return vnegq_f64(values); | ||||
|   } | ||||
|   Vectorized<double> round() const { | ||||
|     return vrndiq_f64(values); | ||||
|   } | ||||
|   Vectorized<double> trunc() const { | ||||
|     return vrndq_f64(values); | ||||
|   } | ||||
|   Vectorized<double> sqrt() const { | ||||
|     return vsqrtq_f64(values); | ||||
|   } | ||||
|   Vectorized<double> reciprocal() const { | ||||
|     return vdivq_f64(vdupq_n_f64(1.0), values); | ||||
|   } | ||||
|   Vectorized<double> rsqrt() const { | ||||
|     return vdivq_f64(vdupq_n_f64(1.0), vsqrtq_f64(values)); | ||||
|   } | ||||
|   double reduce_add() const { | ||||
|     return vaddvq_f64(values); | ||||
|   } | ||||
|   double reduce_max() const { | ||||
|     return vmaxvq_f64(values); | ||||
|   } | ||||
|   Vectorized<double> operator==(const Vectorized<double>& other) const { | ||||
|     return Vectorized<double>( | ||||
|         vreinterpretq_f64_u64(vceqq_f64(values, other.values))); | ||||
|   } | ||||
|  | ||||
|   Vectorized<double> operator!=(const Vectorized<double>& other) const { | ||||
|     float64x2_t r0 = vreinterpretq_f64_u32( | ||||
|         vmvnq_u32(vreinterpretq_u32_u64(vceqq_f64(values, other.values)))); | ||||
|     return Vectorized<double>(r0); | ||||
|   } | ||||
|  | ||||
|   Vectorized<double> operator<(const Vectorized<double>& other) const { | ||||
|     return Vectorized<double>( | ||||
|         vreinterpretq_f64_u64(vcltq_f64(values, other.values))); | ||||
|   } | ||||
|  | ||||
|   Vectorized<double> operator<=(const Vectorized<double>& other) const { | ||||
|     return Vectorized<double>( | ||||
|         vreinterpretq_f64_u64(vcleq_f64(values, other.values))); | ||||
|   } | ||||
|  | ||||
|   Vectorized<double> operator>(const Vectorized<double>& other) const { | ||||
|     return Vectorized<double>( | ||||
|         vreinterpretq_f64_u64(vcgtq_f64(values, other.values))); | ||||
|   } | ||||
|  | ||||
|   Vectorized<double> operator>=(const Vectorized<double>& other) const { | ||||
|     return Vectorized<double>( | ||||
|         vreinterpretq_f64_u64(vcgeq_f64(values, other.values))); | ||||
|   } | ||||
|  | ||||
|   Vectorized<double> eq(const Vectorized<double>& other) const; | ||||
|   Vectorized<double> ne(const Vectorized<double>& other) const; | ||||
|   Vectorized<double> gt(const Vectorized<double>& other) const; | ||||
|   Vectorized<double> ge(const Vectorized<double>& other) const; | ||||
|   Vectorized<double> lt(const Vectorized<double>& other) const; | ||||
|   Vectorized<double> le(const Vectorized<double>& other) const; | ||||
| }; | ||||
|  | ||||
| template <> | ||||
| Vectorized<double> inline operator+( | ||||
|     const Vectorized<double>& a, | ||||
|     const Vectorized<double>& b) { | ||||
|   return vaddq_f64(a, b); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<double> inline operator-( | ||||
|     const Vectorized<double>& a, | ||||
|     const Vectorized<double>& b) { | ||||
|   return vsubq_f64(a, b); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<double> inline operator*( | ||||
|     const Vectorized<double>& a, | ||||
|     const Vectorized<double>& b) { | ||||
|   return vmulq_f64(a, b); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<double> inline operator/( | ||||
|     const Vectorized<double>& a, | ||||
|     const Vectorized<double>& b) { | ||||
|   return vdivq_f64(a, b); | ||||
| } | ||||
|  | ||||
| // frac. Implement this here so we can use subtraction | ||||
| Vectorized<double> inline Vectorized<double>::frac() const { | ||||
|   return *this - this->trunc(); | ||||
| } | ||||
|  | ||||
| // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if | ||||
| // either input is a NaN. | ||||
| template <> | ||||
| Vectorized<double> inline maximum( | ||||
|     const Vectorized<double>& a, | ||||
|     const Vectorized<double>& b) { | ||||
|   return vmaxq_f64(a, b); | ||||
| } | ||||
|  | ||||
| // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if | ||||
| // either input is a NaN. | ||||
| template <> | ||||
| Vectorized<double> inline minimum( | ||||
|     const Vectorized<double>& a, | ||||
|     const Vectorized<double>& b) { | ||||
|   return vminq_f64(a, b); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<double> inline clamp( | ||||
|     const Vectorized<double>& a, | ||||
|     const Vectorized<double>& min, | ||||
|     const Vectorized<double>& max) { | ||||
|   return vminq_f64(max, vmaxq_f64(min, a)); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<double> inline clamp_max( | ||||
|     const Vectorized<double>& a, | ||||
|     const Vectorized<double>& max) { | ||||
|   return vminq_f64(max, a); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<double> inline clamp_min( | ||||
|     const Vectorized<double>& a, | ||||
|     const Vectorized<double>& min) { | ||||
|   return vmaxq_f64(min, a); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<double> inline operator&( | ||||
|     const Vectorized<double>& a, | ||||
|     const Vectorized<double>& b) { | ||||
|   return vreinterpretq_f64_u64( | ||||
|       vandq_u64(vreinterpretq_u64_f64(a), vreinterpretq_u64_f64(b))); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<double> inline operator|( | ||||
|     const Vectorized<double>& a, | ||||
|     const Vectorized<double>& b) { | ||||
|   return vreinterpretq_f64_u64( | ||||
|       vorrq_u64(vreinterpretq_u64_f64(a), vreinterpretq_u64_f64(b))); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<double> inline operator^( | ||||
|     const Vectorized<double>& a, | ||||
|     const Vectorized<double>& b) { | ||||
|   return vreinterpretq_f64_u64( | ||||
|       veorq_u64(vreinterpretq_u64_f64(a), vreinterpretq_u64_f64(b))); | ||||
| } | ||||
|  | ||||
| inline Vectorized<double> Vectorized<double>::eq( | ||||
|     const Vectorized<double>& other) const { | ||||
|   return (*this == other) & Vectorized<double>(1.0); | ||||
| } | ||||
|  | ||||
| inline Vectorized<double> Vectorized<double>::ne( | ||||
|     const Vectorized<double>& other) const { | ||||
|   return (*this != other) & Vectorized<double>(1.0); | ||||
| } | ||||
|  | ||||
| inline Vectorized<double> Vectorized<double>::gt( | ||||
|     const Vectorized<double>& other) const { | ||||
|   return (*this > other) & Vectorized<double>(1.0); | ||||
| } | ||||
|  | ||||
| inline Vectorized<double> Vectorized<double>::ge( | ||||
|     const Vectorized<double>& other) const { | ||||
|   return (*this >= other) & Vectorized<double>(1.0); | ||||
| } | ||||
|  | ||||
| inline Vectorized<double> Vectorized<double>::lt( | ||||
|     const Vectorized<double>& other) const { | ||||
|   return (*this < other) & Vectorized<double>(1.0); | ||||
| } | ||||
|  | ||||
| inline Vectorized<double> Vectorized<double>::le( | ||||
|     const Vectorized<double>& other) const { | ||||
|   return (*this <= other) & Vectorized<double>(1.0); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<double> inline fmadd( | ||||
|     const Vectorized<double>& a, | ||||
|     const Vectorized<double>& b, | ||||
|     const Vectorized<double>& c) { | ||||
|   return vfmaq_f64(c, a, b); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<double> inline fnmadd( | ||||
|     const Vectorized<double>& a, | ||||
|     const Vectorized<double>& b, | ||||
|     const Vectorized<double>& c) { | ||||
|   return vfmsq_f64(c, a, b); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<double> inline fmsub( | ||||
|     const Vectorized<double>& a, | ||||
|     const Vectorized<double>& b, | ||||
|     const Vectorized<double>& c) { | ||||
|   return vfmaq_f64(vnegq_f64(c), a, b); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<double> inline fnmsub( | ||||
|     const Vectorized<double>& a, | ||||
|     const Vectorized<double>& b, | ||||
|     const Vectorized<double>& c) { | ||||
|   return vfmsq_f64(vnegq_f64(c), a, b); | ||||
| } | ||||
|  | ||||
| } // namespace CPU_CAPABILITY | ||||
| } // namespace at::vec | ||||
| @ -307,11 +307,49 @@ class Vectorized<float> { | ||||
|   DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(exp) | ||||
|   DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(exp2) | ||||
|   DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(expm1) | ||||
|   // Implementation copied from Arm Optimized Routine | ||||
|   // https://github.com/ARM-software/optimized-routines/blob/master/math/aarch64/advsimd/expf.c | ||||
|   Vectorized<float> exp_u20() const { | ||||
|     return exp(); | ||||
|     // bail out to sleef if it's a special case: | ||||
|     // i.e. there's an input s.t. |input| > 87.3.... | ||||
|     const float32x4_t special_bound = vdupq_n_f32(0x1.5d5e2ap+6f); | ||||
|     uint32x4_t cmp = vcagtq_f32(values, special_bound); | ||||
|     if (vpaddd_u64(vreinterpretq_u64_u32(cmp)) != 0) { | ||||
|       return exp(); | ||||
|     } | ||||
|  | ||||
|     const float32x4_t inv_ln2 = vdupq_n_f32(0x1.715476p+0f); | ||||
|     const float ln2_hi = 0x1.62e4p-1f; | ||||
|     const float ln2_lo = 0x1.7f7d1cp-20f; | ||||
|     const float c0 = 0x1.0e4020p-7f; | ||||
|     const float c2 = 0x1.555e66p-3f; | ||||
|     const float32x4_t ln2_c02 = {ln2_hi, ln2_lo, c0, c2}; | ||||
|  | ||||
|     const uint32x4_t exponent_bias = vdupq_n_u32(0x3f800000); | ||||
|     const float32x4_t c1 = vdupq_n_f32(0x1.573e2ep-5f); | ||||
|     const float32x4_t c3 = vdupq_n_f32(0x1.fffdb6p-2f); | ||||
|     const float32x4_t c4 = vdupq_n_f32(0x1.ffffecp-1f); | ||||
|  | ||||
|     /* exp(x) = 2^n (1 + poly(r)), with 1 + poly(r) in [1/sqrt(2),sqrt(2)] | ||||
|       x = ln2*n + r, with r in [-ln2/2, ln2/2].  */ | ||||
|  | ||||
|     float32x4_t n = vrndaq_f32(vmulq_f32(values, inv_ln2)); | ||||
|     float32x4_t r = vfmsq_laneq_f32(values, n, ln2_c02, 0); | ||||
|     r = vfmsq_laneq_f32(r, n, ln2_c02, 1); | ||||
|     uint32x4_t e = vshlq_n_u32(vreinterpretq_u32_s32(vcvtq_s32_f32(n)), 23); | ||||
|     float32x4_t scale = vreinterpretq_f32_u32(vaddq_u32(e, exponent_bias)); | ||||
|  | ||||
|     float32x4_t r2 = vmulq_f32(r, r); | ||||
|     float32x4_t p = vfmaq_laneq_f32(c1, r, ln2_c02, 2); | ||||
|     float32x4_t q = vfmaq_laneq_f32(c3, r, ln2_c02, 3); | ||||
|     q = vfmaq_f32(q, p, r2); | ||||
|     p = vmulq_f32(c4, r); | ||||
|     float32x4_t poly = vfmaq_f32(p, q, r2); | ||||
|  | ||||
|     return vfmaq_f32(scale, poly, scale); | ||||
|   } | ||||
|   Vectorized<float> fexp_u20() const { | ||||
|     return exp(); | ||||
|     return exp_u20(); | ||||
|   } | ||||
|   DEFINE_SLEEF_COMPATIBLE_BINARY_ELEMENTWISE_FUNC_WITH_SLEEF_NAME( | ||||
|       fmod, | ||||
| @ -540,42 +578,6 @@ inline Vectorized<float> Vectorized<float>::le( | ||||
|   return (*this <= other) & Vectorized<float>(1.0f); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| inline void convert(const float* src, int32_t* dst, int64_t n) { | ||||
|   int64_t i; | ||||
| #ifndef __msvc_cl__ | ||||
| #pragma unroll | ||||
| #endif | ||||
|   for (i = 0; i <= (n - Vectorized<float>::size()); | ||||
|        i += Vectorized<float>::size()) { | ||||
|     vst1q_s32(dst + i, vcvtq_s32_f32(vld1q_f32(src + i))); | ||||
|   } | ||||
| #ifndef __msvc_cl__ | ||||
| #pragma unroll | ||||
| #endif | ||||
|   for (; i < n; i++) { | ||||
|     dst[i] = static_cast<int32_t>(src[i]); | ||||
|   } | ||||
| } | ||||
|  | ||||
| template <> | ||||
| inline void convert(const int32_t* src, float* dst, int64_t n) { | ||||
|   int64_t i; | ||||
| #ifndef __msvc_cl__ | ||||
| #pragma unroll | ||||
| #endif | ||||
|   for (i = 0; i <= (n - Vectorized<float>::size()); | ||||
|        i += Vectorized<float>::size()) { | ||||
|     vst1q_f32(dst + i, vcvtq_f32_s32(vld1q_s32(src + i))); | ||||
|   } | ||||
| #ifndef __msvc_cl__ | ||||
| #pragma unroll | ||||
| #endif | ||||
|   for (; i < n; i++) { | ||||
|     dst[i] = static_cast<float>(src[i]); | ||||
|   } | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<float> inline fmadd( | ||||
|     const Vectorized<float>& a, | ||||
| @ -632,8 +634,7 @@ inline Vectorized<float> Vectorized<float>::erf() const { | ||||
|   // - exp(- x * x) | ||||
|   auto pow_2 = (*this) * (*this); | ||||
|   auto neg_pow_2 = pow_2 ^ neg_zero_vec; | ||||
|   auto tmp4 = neg_pow_2.map( | ||||
|       std::exp); // This can be swapped for a faster implementation of exp. | ||||
|   auto tmp4 = neg_pow_2.exp(); | ||||
|   auto tmp5 = tmp4 ^ neg_zero_vec; | ||||
|   // erf(x) = sign(x) * (1 - r * t * exp(- x * x)) | ||||
|   auto tmp6 = t * tmp5; | ||||
|  | ||||
| @ -234,7 +234,7 @@ class Vectorized<c10::Half> : public Vectorized16< | ||||
|         vshlq_u16(vandq_u16(is_zero_vec, vdupq_n_u16(1)), shift); | ||||
|     return vaddvq_u16(bits_vec); | ||||
| #else // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC | ||||
|     // use known working implmentation. | ||||
|     // use known working implementation. | ||||
|     __at_align__ value_type tmp[size()]; | ||||
|     store(tmp); | ||||
|     int mask = 0; | ||||
| @ -569,46 +569,6 @@ inline Vectorized<c10::Half> Vectorized<c10::Half>::le( | ||||
|   return (*this <= other) & Vectorized<c10::Half>(1); | ||||
| } | ||||
|  | ||||
| // These are global functions, so the defaults in vec_base.h should | ||||
| // work fine if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC is not available. | ||||
| #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC | ||||
| template <> | ||||
| inline void convert(const float16_t* src, int16_t* dst, int64_t n) { | ||||
|   int64_t i; | ||||
| #ifndef __msvc_cl__ | ||||
| #pragma unroll | ||||
| #endif | ||||
|   for (i = 0; i <= (n - Vectorized<c10::Half>::size()); | ||||
|        i += Vectorized<c10::Half>::size()) { | ||||
|     vst1q_s16(dst + i, vcvtq_s16_f16(vld1q_f16(src + i))); | ||||
|   } | ||||
| #ifndef __msvc_cl__ | ||||
| #pragma unroll | ||||
| #endif | ||||
|   for (; i < n; i++) { | ||||
|     dst[i] = static_cast<int16_t>(src[i]); | ||||
|   } | ||||
| } | ||||
|  | ||||
| template <> | ||||
| inline void convert(const int16_t* src, float16_t* dst, int64_t n) { | ||||
|   int64_t i; | ||||
| #ifndef __msvc_cl__ | ||||
| #pragma unroll | ||||
| #endif | ||||
|   for (i = 0; i <= (n - Vectorized<c10::Half>::size()); | ||||
|        i += Vectorized<c10::Half>::size()) { | ||||
|     vst1q_f16(dst + i, vcvtq_f16_s16(vld1q_s16(src + i))); | ||||
|   } | ||||
| #ifndef __msvc_cl__ | ||||
| #pragma unroll | ||||
| #endif | ||||
|   for (; i < n; i++) { | ||||
|     dst[i] = static_cast<float16_t>(src[i]); | ||||
|   } | ||||
| } | ||||
| #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC | ||||
|  | ||||
| template <> | ||||
| Vectorized<c10::Half> inline fmadd( | ||||
|     const Vectorized<c10::Half>& a, | ||||
|  | ||||
| @ -1740,7 +1740,7 @@ Vectorized<int16_t> inline shift_256_16( | ||||
|  | ||||
|   // Control masks for shuffle operation, treating 256 bits as an | ||||
|   // array of 16-bit elements, and considering pairs of neighboring | ||||
|   // elements.  Specifially, a mask named "ctl_M_N" (M,N in [0,1], and | ||||
|   // elements.  Specifically, a mask named "ctl_M_N" (M,N in [0,1], and | ||||
|   // M!=N) is set so that shuffle will move element with index M from | ||||
|   // input pair into element with index N in output pair, and element | ||||
|   // with index M in output pair will be set to all 0s. | ||||
| @ -1875,7 +1875,7 @@ Vectorized<T> inline shift_256_8( | ||||
|  | ||||
|   // Control masks for shuffle operation, treating 256 bits as an | ||||
|   // array of 8-bit elements, and considering quadruples of | ||||
|   // neighboring elements.  Specifially, a mask named "ctl_M_N" (M,N | ||||
|   // neighboring elements.  Specifically, a mask named "ctl_M_N" (M,N | ||||
|   // in [0,1,2,3], and M!=N) is set so that shuffle will move element | ||||
|   // with index M from input quadruple into element with index N in | ||||
|   // output quadruple, and other elements in output quadruple will be | ||||
|  | ||||
| @ -143,7 +143,7 @@ class Vectorized<double> { | ||||
|       const Vectorized<double>& a, | ||||
|       const Vectorized<double>& b, | ||||
|       const Vectorized<double>& mask) { | ||||
|     // the mask used here returned by comparision of vec256 | ||||
|     // the mask used here returned by comparison of vec256 | ||||
|  | ||||
|     return { | ||||
|         vec_sel(a._vec0, b._vec0, mask._vecb0), | ||||
|  | ||||
| @ -142,7 +142,7 @@ class Vectorized<float> { | ||||
|       const Vectorized<float>& a, | ||||
|       const Vectorized<float>& b, | ||||
|       const Vectorized<float>& mask) { | ||||
|     // the mask used here returned by comparision of vec256 | ||||
|     // the mask used here returned by comparison of vec256 | ||||
|     // assuming this we can use the same mask directly with vec_sel | ||||
|     return { | ||||
|         vec_sel(a._vec0, b._vec0, mask._vecb0), | ||||
|  | ||||
| @ -202,7 +202,7 @@ class Vectorized<int16_t> { | ||||
|       const Vectorized<int16_t>& a, | ||||
|       const Vectorized<int16_t>& b, | ||||
|       const Vectorized<int16_t>& mask) { | ||||
|     // the mask used here returned by comparision of vec256 | ||||
|     // the mask used here returned by comparison of vec256 | ||||
|     // assuming this we can use the same mask directly with vec_sel | ||||
|     // warning intel style mask will not work properly | ||||
|     return { | ||||
|  | ||||
| @ -155,7 +155,7 @@ class Vectorized<int32_t> { | ||||
|       const Vectorized<int32_t>& a, | ||||
|       const Vectorized<int32_t>& b, | ||||
|       const Vectorized<int32_t>& mask) { | ||||
|     // the mask used here returned by comparision of vec256 | ||||
|     // the mask used here returned by comparison of vec256 | ||||
|     // assuming this we can use the same mask directly with vec_sel | ||||
|     // warning intel style mask will not work properly | ||||
|     return { | ||||
|  | ||||
| @ -119,7 +119,7 @@ class Vectorized<int64_t> { | ||||
|       const Vectorized<int64_t>& a, | ||||
|       const Vectorized<int64_t>& b, | ||||
|       const Vectorized<int64_t>& mask) { | ||||
|     // the mask used here returned by comparision of vec256 | ||||
|     // the mask used here returned by comparison of vec256 | ||||
|  | ||||
|     return { | ||||
|         vec_sel(a._vec0, b._vec0, mask._vecb0), | ||||
|  | ||||
| @ -397,7 +397,7 @@ inline Vectorized<bool> operator&&( | ||||
|   const __m512i* other_ = reinterpret_cast<const __m512i*>(other.as_bytes()); | ||||
|   __m512i out = _mm512_and_si512(*self_, *other_); | ||||
|   Vectorized<bool> ret; | ||||
|   // We do not have a constructer that takes __m512i, so we need to memcpy | ||||
|   // We do not have a constructor that takes __m512i, so we need to memcpy | ||||
|   std::memcpy(ret, &out, ret.size() * sizeof(bool)); | ||||
|   return ret; | ||||
| } | ||||
|  | ||||
| @ -1852,7 +1852,7 @@ Vectorized<T> inline shift_512_8( | ||||
|  | ||||
|   // Control masks for shuffle operation, treating 512 bits as an | ||||
|   // array of 8-bit elements, and considering pairs of neighboring | ||||
|   // elements.  Specifially, a mask named "ctl_M_N" (M,N in [0,1], and | ||||
|   // elements.  Specifically, a mask named "ctl_M_N" (M,N in [0,1], and | ||||
|   // M!=N) is set so that shuffle will move element with index M from | ||||
|   // input pair into element with index N in output pair, and element | ||||
|   // with index M in output pair will be set to all 0s. | ||||
|  | ||||
| @ -634,7 +634,7 @@ struct Vectorized { | ||||
|   } | ||||
|   Vectorized<T> neg() const { | ||||
|     // NB: the trailing return type is needed because we need to coerce the | ||||
|     // return value back to T in the case of unary operator- incuring a | ||||
|     // return value back to T in the case of unary operator- incurring a | ||||
|     // promotion | ||||
|     return map([](T x) -> T { return -x; }); | ||||
|   } | ||||
|  | ||||
| @ -1958,7 +1958,7 @@ void scaled_gemm( | ||||
|     ScalarType result_dtype, | ||||
|     bool use_fast_accum, | ||||
|     const std::optional<Tensor>& alpha) { | ||||
|   // Note: see `cublasCommonArgs` for various non-intuitive manupulations | ||||
|   // Note: see `cublasCommonArgs` for various non-intuitive manipulations | ||||
|   // of input arguments to this function. | ||||
|   const auto computeType = CUBLAS_COMPUTE_32F; | ||||
|   const auto scaleType = CUDA_R_32F; | ||||
|  | ||||
| @ -2,10 +2,10 @@ | ||||
|  | ||||
| #include <ATen/cuda/ATenCUDAGeneral.h> | ||||
| #include <ATen/cuda/CUDAContext.h> | ||||
| #include <c10/core/impl/GPUTrace.h> | ||||
| #include <c10/cuda/CUDAStream.h> | ||||
| #include <c10/cuda/CUDAGuard.h> | ||||
| #include <ATen/cuda/Exceptions.h> | ||||
| #include <c10/core/impl/GPUTrace.h> | ||||
| #include <c10/cuda/CUDAGuard.h> | ||||
| #include <c10/cuda/CUDAStream.h> | ||||
| #include <c10/util/Exception.h> | ||||
|  | ||||
| #include <cuda_runtime_api.h> | ||||
| @ -246,4 +246,79 @@ private: | ||||
|   } | ||||
| }; | ||||
|  | ||||
| // EventPool - Thread-safe pool of CUDA events to avoid expensive cudaEventCreate | ||||
| // calls. cudaEventCreate when concurrently invoked from multiple threads can be | ||||
| // very expensive (especially on certain device/driver combinations). | ||||
| using CUDAEventPtr = | ||||
|     std::unique_ptr<CUDAEvent, std::function<void(CUDAEvent*)>>; | ||||
|  | ||||
| class EventPool { | ||||
|  public: | ||||
|   EventPool() : pools_(at::cuda::device_count()) {} | ||||
|  | ||||
|   CUDAEventPtr get(const DeviceIndex device) { | ||||
|     // If the device is invalid, return a default event and no pooling | ||||
|     if (device < 0 || device >= (DeviceIndex)pools_.size()) { | ||||
|       auto deleter = [](CUDAEvent* event) { | ||||
|         delete event; | ||||
|       }; | ||||
|       return CUDAEventPtr( | ||||
|         std::make_unique<CUDAEvent>(cudaEventDisableTiming).release(), deleter); | ||||
|     } | ||||
|  | ||||
|     auto& pool = pools_[device]; | ||||
|  | ||||
|     // Create a destructor that returns the event to the appropriate device pool | ||||
|     auto destructor = [&pool](CUDAEvent* event) noexcept { | ||||
|       if (event != nullptr) { | ||||
|         std::lock_guard<std::mutex> lock(pool.mutex_); | ||||
|         pool.event_pool_.emplace_back(event); | ||||
|       } | ||||
|     }; | ||||
|  | ||||
|     { | ||||
|       std::lock_guard<std::mutex> lock(pool.mutex_); | ||||
|       if (!pool.event_pool_.empty()) { | ||||
|         auto event = std::move(pool.event_pool_.back()); | ||||
|         pool.event_pool_.pop_back(); | ||||
|         return CUDAEventPtr(event.release(), destructor); | ||||
|       } | ||||
|     } | ||||
|  | ||||
|     return CUDAEventPtr( | ||||
|         std::make_unique<CUDAEvent>(cudaEventDisableTiming).release(), | ||||
|         destructor); | ||||
|   } | ||||
|  | ||||
|   void empty_cache() { | ||||
|     for (auto& pool : pools_) { | ||||
|       std::lock_guard<std::mutex> lock(pool.mutex_); | ||||
|       pool.event_pool_.clear(); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   void init_num_events(const size_t num_events) { | ||||
|     for (DeviceIndex device_idx = 0; device_idx < at::cuda::device_count(); ++device_idx) { | ||||
|         CUDAGuard device_guard(device_idx); | ||||
|         std::vector<CUDAEventPtr> temp_events; | ||||
|         temp_events.reserve(num_events); | ||||
|         for (size_t i = 0; i < num_events; ++i) { | ||||
|           auto event = get(device_idx); | ||||
|           // Record the event to ensure it's properly initialized | ||||
|           event->record(); | ||||
|           temp_events.emplace_back(std::move(event)); | ||||
|         } | ||||
|         // Events will be returned to pool when temp_events is destroyed | ||||
|     } | ||||
|   } | ||||
|  | ||||
|  private: | ||||
|   struct alignas(64) PerDevicePool { | ||||
|     alignas(64) std::mutex mutex_; | ||||
|     std::vector<std::unique_ptr<CUDAEvent>> event_pool_; | ||||
|   }; | ||||
|  | ||||
|   std::vector<PerDevicePool> pools_; | ||||
| }; | ||||
|  | ||||
| } // namespace at::cuda | ||||
|  | ||||
| @ -168,11 +168,9 @@ void CUDAGraph::instantiate() { | ||||
|   // https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__GRAPH.html#group__CUDART__GRAPH_1g1accfe1da0c605a577c22d9751a09597 | ||||
|   // cudaGraphInstantiateWithFlags | ||||
|   // https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__GRAPH.html#group__CUDART__GRAPH_1ga2c652a24ba93e52b99a47bec0888233 | ||||
| #if !defined(USE_ROCM) || ROCM_VERSION >= 60200 | ||||
|   int version = 0; | ||||
|   AT_CUDA_CHECK(cudaDriverGetVersion(&version)); | ||||
|   if (version < 11040) { | ||||
| #endif | ||||
|     // Trailing NULL, NULL, 0 arguments were recommended by Cuda driver people, | ||||
|     // who prefer not to report error message through these arguments moving forward | ||||
|     // (they prefer return value, or errors on api calls internal to the capture) | ||||
| @ -183,13 +181,11 @@ void CUDAGraph::instantiate() { | ||||
| #endif | ||||
| //Since ROCm 6.2, we want to go down this path as hipGraphExecDestroy in the destructor will not immediately free the memory. | ||||
| //It will wait for the next sync operation. cudaGraphInstantiateFlagAutoFreeOnLaunch will add async frees after graph launch. | ||||
| #if !defined(USE_ROCM) || ROCM_VERSION >= 60200 | ||||
|   } else { | ||||
|     AT_CUDA_CHECK(cudaGraphInstantiateWithFlags(&graph_exec_, | ||||
|                                                 graph_, | ||||
|                                                 cudaGraphInstantiateFlagAutoFreeOnLaunch)); | ||||
|   } | ||||
| #endif | ||||
|   has_graph_exec_ = true; | ||||
| } | ||||
|  | ||||
| @ -311,7 +307,7 @@ CUDAGraph::~CUDAGraph() { | ||||
| // There are recent HIP changes where hipGraphExecDestroy doesn't immediately free memory. | ||||
| // They wait for next sync point in order to free the memory, this is to ensure that all | ||||
| // hipGraphLaunch are finished before we release any memory. This feature was enabled in rocm6.2. | ||||
| // We need to ensure all async opreations finish before deleting the object. | ||||
| // We need to ensure all async operations finish before deleting the object. | ||||
| #if (defined(USE_ROCM) && ROCM_VERSION >= 60200) | ||||
|   if (capture_dev_ != UNDEFINED_DEVICE) // check if capture_dev_ contains the real device id | ||||
|   { | ||||
|  | ||||
							
								
								
									
										270
									
								
								aten/src/ATen/cuda/CUDAScaledBlas.cpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										270
									
								
								aten/src/ATen/cuda/CUDAScaledBlas.cpp
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,270 @@ | ||||
| #include <cstdint> | ||||
| #include <c10/util/typeid.h> | ||||
| #include <c10/util/Exception.h> | ||||
| #include <c10/util/SmallVector.h> | ||||
| #include <c10/core/Scalar.h> | ||||
| #include <c10/core/ScalarType.h> | ||||
| #include <c10/util/Exception.h> | ||||
| #define TORCH_ASSERT_ONLY_METHOD_OPERATORS | ||||
| #include <ATen/core/Tensor.h> | ||||
| #include <ATen/core/NamedTensor.h> | ||||
| #include <ATen/Dispatch.h> | ||||
| #include <ATen/ExpandUtils.h> | ||||
| #include <ATen/OpMathType.h> | ||||
| #include <ATen/TensorUtils.h> | ||||
| #include <ATen/cuda/CUDABlas.h> | ||||
| #include <ATen/cuda/tunable/Tunable.h> | ||||
| #include <ATen/cuda/tunable/TunableGemm.h> | ||||
| #include <ATen/native/Resize.h> | ||||
| #include <c10/util/MaybeOwned.h> | ||||
| #include <ATen/native/GroupedMMUtils.h> | ||||
| #include <ATen/native/cuda/RowwiseScaledMM.h> | ||||
| #include <ATen/native/cuda/ScaledGroupMM.h> | ||||
| #include <ATen/native/cuda/GroupMM.h> | ||||
| #include <ATen/ceil_div.h> | ||||
|  | ||||
| #ifdef USE_FBGEMM_GENAI | ||||
| #include <fbgemm_gpu/torch_ops.h> | ||||
| #endif | ||||
|  | ||||
| #ifndef AT_PER_OPERATOR_HEADERS | ||||
| #include <ATen/Functions.h> | ||||
| #include <ATen/NativeFunctions.h> | ||||
| #else | ||||
| #include <ATen/ops/_addmm_activation_native.h> | ||||
| #include <ATen/ops/_efficientzerotensor.h> | ||||
| #include <ATen/ops/_scaled_mm_native.h> | ||||
| #include <ATen/ops/_unsafe_view_native.h> | ||||
| #include <ATen/ops/abs.h> | ||||
| #include <ATen/ops/addmm_native.h> | ||||
| #include <ATen/ops/addmv_native.h> | ||||
| #include <ATen/ops/baddbmm_native.h> | ||||
| #include <ATen/ops/bmm_native.h> | ||||
| #include <ATen/ops/copy_native.h> | ||||
| #include <ATen/ops/dot_native.h> | ||||
| #include <ATen/ops/empty.h> | ||||
| #include <ATen/ops/empty_strided.h> | ||||
| #include <ATen/ops/gelu.h> | ||||
| #include <ATen/ops/max.h> | ||||
| #include <ATen/ops/mm_native.h> | ||||
| #include <ATen/ops/mul.h> | ||||
| #include <ATen/ops/relu.h> | ||||
| #include <ATen/ops/ones.h> | ||||
| #include <ATen/ops/scalar_tensor_native.h> | ||||
| #include <ATen/ops/vdot_native.h> | ||||
| #endif | ||||
|  | ||||
| using at::blas::ScalingType; | ||||
| using at::blas::SwizzleType; | ||||
|  | ||||
| namespace at::cuda::scaled { | ||||
|  | ||||
| /** | ||||
|  * Both inputs must be fp8, | ||||
|  * Each needs a single scale, {Tensorwise (float)} | ||||
|  */ | ||||
| bool check_tensorwise_recipe(c10::ScalarType type_a, | ||||
|                              std::vector<ScalingType>& recipe_a, | ||||
|                              ArrayRef<Tensor>& scales_a, | ||||
|                              c10::ScalarType type_b, | ||||
|                              std::vector<ScalingType>& recipe_b, | ||||
|                              ArrayRef<Tensor>& scales_b) { | ||||
|   // both types must be fp8 | ||||
|   if (!isFloat8Type(type_a) || !isFloat8Type(type_b)) { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
|   // 1 scale each, {Tensorwise, float} | ||||
|   if (scales_a.size() != 1 || recipe_a.size() != 1 || scales_b.size() != 1 || recipe_b.size() != 1) { | ||||
|     return false; | ||||
|   } | ||||
|   // Need {Blockwise_1x32, e8m0} for A & B | ||||
|   if (recipe_a[0] != ScalingType::TensorWise) return false; | ||||
|   if (scales_a[0].scalar_type() != ScalarType::Float) return false; | ||||
|   if (recipe_b[0] != ScalingType::TensorWise) return false; | ||||
|   if (scales_b[0].scalar_type() != ScalarType::Float) return false; | ||||
|  | ||||
|   return true; | ||||
| } | ||||
|  | ||||
| /** | ||||
|  * Both inputs must be fp8, | ||||
|  * Each needs scales, {Rowwise (float)} | ||||
|  */ | ||||
| bool check_rowwise_recipe(c10::ScalarType type_a, | ||||
|                              std::vector<ScalingType>& recipe_a, | ||||
|                              ArrayRef<Tensor>& scales_a, | ||||
|                              c10::ScalarType type_b, | ||||
|                              std::vector<ScalingType>& recipe_b, | ||||
|                              ArrayRef<Tensor>& scales_b) { | ||||
|   // both types must be fp8 | ||||
|   if (!isFloat8Type(type_a) || !isFloat8Type(type_b)) { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
|   // 1 scale each, {Tensorwise, float} | ||||
|   if (scales_a.size() != 1 || recipe_a.size() != 1 || scales_b.size() != 1 || recipe_b.size() != 1) { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
|   // Need {RowWise, dp32} for A & B | ||||
|   if (recipe_a[0] != ScalingType::RowWise) return false; | ||||
|   if (scales_a[0].scalar_type() != ScalarType::Float) return false; | ||||
|   if (recipe_b[0] != ScalingType::RowWise) return false; | ||||
|   if (scales_b[0].scalar_type() != ScalarType::Float) return false; | ||||
|  | ||||
|   return true; | ||||
| } | ||||
|  | ||||
|  | ||||
| /** | ||||
|  * Two-level scaling, canonical NVFP4 | ||||
|  * Both inputs must be fp4 | ||||
|  * A, B need 2 scales, {Blockwise_1x16 (e4m3), Tensorwise (fp32)} | ||||
|  */ | ||||
| bool check_nvfp4_recipe(c10::ScalarType type_a, | ||||
|                         std::vector<ScalingType>& recipe_a, | ||||
|                         ArrayRef<Tensor>& scales_a, | ||||
|                         c10::ScalarType type_b, | ||||
|                         std::vector<ScalingType>& recipe_b, | ||||
|                         ArrayRef<Tensor>& scales_b) { | ||||
|   // both types must be fp4 | ||||
|   if (type_a != ScalarType::Float4_e2m1fn_x2 || type_b != ScalarType::Float4_e2m1fn_x2) { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
|   // 2 scales, 2 recipes for each input | ||||
|   if (scales_a.size() != 2 || recipe_a.size() != 2 || scales_b.size() != 2 || recipe_b.size() != 2) { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
|   // Need {Blockwise_1x16, e4m3 for scale[0], Tensorwise, fp32 for scale[1]} | ||||
|   if (recipe_a[0] != ScalingType::BlockWise1x16 || recipe_a[1] != ScalingType::TensorWise) return false; | ||||
|   if (scales_a[0].scalar_type() != ScalarType::Float8_e4m3fn || scales_a[1].scalar_type() != ScalarType::Float) return false; | ||||
|   if (recipe_b[0] != ScalingType::BlockWise1x16 || recipe_b[1] != ScalingType::TensorWise) return false; | ||||
|   if (scales_b[0].scalar_type() != ScalarType::Float8_e4m3fn || scales_b[1].scalar_type() != ScalarType::Float) return false; | ||||
|  | ||||
|   return true; | ||||
| } | ||||
|  | ||||
| /** | ||||
|  * Single-level scaling, what PyT currently understands | ||||
|  * Both inputs must be fp4 | ||||
|  * A, B need 1 scale, {Blockwise_1x16 (e4m3)} | ||||
|  */ | ||||
| bool check_nvfp4_recipe_single_scale | ||||
|                        (c10::ScalarType type_a, | ||||
|                         std::vector<ScalingType>& recipe_a, | ||||
|                         ArrayRef<Tensor>& scales_a, | ||||
|                         c10::ScalarType type_b, | ||||
|                         std::vector<ScalingType>& recipe_b, | ||||
|                         ArrayRef<Tensor>& scales_b) { | ||||
|   // both types must be fp4 | ||||
|   if (type_a != ScalarType::Float4_e2m1fn_x2 || type_b != ScalarType::Float4_e2m1fn_x2) { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
|   // 2 scales, 2 recipes for each input | ||||
|   if (scales_a.size() != 1 || recipe_a.size() != 1 || scales_b.size() != 1 || recipe_b.size() != 1) { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
|   // Need {Blockwise_1x16, e4m3 for scale[0], Tensorwise, fp32 for scale[1]} | ||||
|   if (recipe_a[0] != ScalingType::BlockWise1x16) return false; | ||||
|   if (scales_a[0].scalar_type() != ScalarType::Float8_e4m3fn) return false; | ||||
|   if (recipe_b[0] != ScalingType::BlockWise1x16) return false; | ||||
|   if (scales_b[0].scalar_type() != ScalarType::Float8_e4m3fn) return false; | ||||
|  | ||||
|   return true; | ||||
| } | ||||
|  | ||||
| /** | ||||
|  * Both inputs must be fp8 | ||||
|  * A, B must only have 1 scale each, A: {Blockwise_1x128 (float), B: {Blockwise_128x128 (float) | ||||
|  */ | ||||
| bool check_deepseek_recipe(ScalingType expected_recipe_a, | ||||
|                            ScalingType expected_recipe_b, | ||||
|                            c10::ScalarType type_a, | ||||
|                            std::vector<ScalingType>& recipe_a, | ||||
|                            ArrayRef<Tensor>& scales_a, | ||||
|                            c10::ScalarType type_b, | ||||
|                            std::vector<ScalingType>& recipe_b, | ||||
|                            ArrayRef<Tensor>& scales_b) { | ||||
|   // both types must be fp8 | ||||
|   if (type_a != ScalarType::Float8_e4m3fn || type_b != ScalarType::Float8_e4m3fn) { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
|   // 1 scales, 1 recipes for each input | ||||
|   if (scales_a.size() != 1 || recipe_a.size() != 1 || scales_b.size() != 1 || recipe_b.size() != 1) { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
|   // Need {Blockwise_1x128, float} for A, {Blockwise_128x128, float} for B | ||||
|   if (recipe_a[0] != expected_recipe_a) return false; | ||||
|   if (scales_a[0].scalar_type() != ScalarType::Float) return false; | ||||
|   if (recipe_b[0] != expected_recipe_b) return false; | ||||
|   if (scales_b[0].scalar_type() != ScalarType::Float) return false; | ||||
|  | ||||
|   return true; | ||||
| } | ||||
|  | ||||
| /** | ||||
|  * Both inputs must be fp8 | ||||
|  * A, B must have 1 scale each, {Blockwise_1x32, e8m0} | ||||
|  */ | ||||
| bool check_mxfp8_recipe(c10::ScalarType type_a, | ||||
|                         std::vector<ScalingType>& recipe_a, | ||||
|                         ArrayRef<Tensor>& scales_a, | ||||
|                         c10::ScalarType type_b, | ||||
|                         std::vector<ScalingType>& recipe_b, | ||||
|                         ArrayRef<Tensor>& scales_b) { | ||||
|   // both types must be fp8 | ||||
|   if (type_a != ScalarType::Float8_e4m3fn || type_b != ScalarType::Float8_e4m3fn) { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
|   // 1 scales, 1 recipes for each input | ||||
|   if (scales_a.size() != 1 || recipe_a.size() != 1 || scales_b.size() != 1 || recipe_b.size() != 1) { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
|   // Need {Blockwise_1x32, e8m0} for A & B | ||||
|   if (recipe_a[0] != ScalingType::BlockWise1x32) return false; | ||||
|   if (scales_a[0].scalar_type() != ScalarType::Float8_e8m0fnu) return false; | ||||
|   if (recipe_b[0] != ScalingType::BlockWise1x32) return false; | ||||
|   if (scales_b[0].scalar_type() != ScalarType::Float8_e8m0fnu) return false; | ||||
|  | ||||
|   return true; | ||||
| } | ||||
|  | ||||
| /** | ||||
|  * Both inputs must be fp4 | ||||
|  * A, B must have 1 scale each, {Blockwise_1x32, e8m0} | ||||
|  */ | ||||
| bool check_mxfp4_recipe(c10::ScalarType type_a, | ||||
|                         std::vector<ScalingType>& recipe_a, | ||||
|                         ArrayRef<Tensor>& scales_a, | ||||
|                         c10::ScalarType type_b, | ||||
|                         std::vector<ScalingType>& recipe_b, | ||||
|                         ArrayRef<Tensor>& scales_b) { | ||||
|   // both types must be fp4 | ||||
|   if (type_a != ScalarType::Float4_e2m1fn_x2 || type_b != ScalarType::Float4_e2m1fn_x2) { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
|   // 1 scales, 1 recipes for each input | ||||
|   if (scales_a.size() != 1 || recipe_a.size() != 1 || scales_b.size() != 1 || recipe_b.size() != 1) { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
|   // Need {Blockwise_1x32, e8m0} for A & B | ||||
|   if (recipe_a[0] != ScalingType::BlockWise1x32) return false; | ||||
|   if (scales_a[0].scalar_type() != ScalarType::Float8_e8m0fnu) return false; | ||||
|   if (recipe_b[0] != ScalingType::BlockWise1x32) return false; | ||||
|   if (scales_b[0].scalar_type() != ScalarType::Float8_e8m0fnu) return false; | ||||
|  | ||||
|   return true; | ||||
| } | ||||
|  | ||||
| } // namespace at::native::cuda::blas::scaled | ||||
							
								
								
									
										174
									
								
								aten/src/ATen/cuda/CUDAScaledBlas.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										174
									
								
								aten/src/ATen/cuda/CUDAScaledBlas.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,174 @@ | ||||
| #include <cstdint> | ||||
| #include <c10/util/typeid.h> | ||||
| #include <c10/util/Exception.h> | ||||
| #include <c10/util/SmallVector.h> | ||||
| #include <c10/core/Scalar.h> | ||||
| #include <c10/core/ScalarType.h> | ||||
| #include <c10/util/Exception.h> | ||||
| #define TORCH_ASSERT_ONLY_METHOD_OPERATORS | ||||
| #include <ATen/core/Tensor.h> | ||||
| #include <ATen/core/NamedTensor.h> | ||||
| #include <ATen/Dispatch.h> | ||||
| #include <ATen/ExpandUtils.h> | ||||
| #include <ATen/OpMathType.h> | ||||
| #include <ATen/TensorUtils.h> | ||||
| #include <ATen/cuda/CUDABlas.h> | ||||
| #include <ATen/cuda/tunable/Tunable.h> | ||||
| #include <ATen/cuda/tunable/TunableGemm.h> | ||||
| #include <ATen/native/Resize.h> | ||||
| #include <c10/util/MaybeOwned.h> | ||||
| #include <ATen/native/GroupedMMUtils.h> | ||||
| #include <ATen/native/cuda/RowwiseScaledMM.h> | ||||
| #include <ATen/native/cuda/ScaledGroupMM.h> | ||||
| #include <ATen/native/cuda/GroupMM.h> | ||||
| #include <ATen/ceil_div.h> | ||||
|  | ||||
| #ifdef USE_FBGEMM_GENAI | ||||
| #include <fbgemm_gpu/torch_ops.h> | ||||
| #endif | ||||
|  | ||||
| #ifndef AT_PER_OPERATOR_HEADERS | ||||
| #include <ATen/Functions.h> | ||||
| #include <ATen/NativeFunctions.h> | ||||
| #else | ||||
| #include <ATen/ops/_addmm_activation_native.h> | ||||
| #include <ATen/ops/_efficientzerotensor.h> | ||||
| #include <ATen/ops/_scaled_mm_native.h> | ||||
| #include <ATen/ops/_unsafe_view_native.h> | ||||
| #include <ATen/ops/abs.h> | ||||
| #include <ATen/ops/addmm_native.h> | ||||
| #include <ATen/ops/addmv_native.h> | ||||
| #include <ATen/ops/baddbmm_native.h> | ||||
| #include <ATen/ops/bmm_native.h> | ||||
| #include <ATen/ops/copy_native.h> | ||||
| #include <ATen/ops/dot_native.h> | ||||
| #include <ATen/ops/empty.h> | ||||
| #include <ATen/ops/empty_strided.h> | ||||
| #include <ATen/ops/gelu.h> | ||||
| #include <ATen/ops/max.h> | ||||
| #include <ATen/ops/mm_native.h> | ||||
| #include <ATen/ops/mul.h> | ||||
| #include <ATen/ops/relu.h> | ||||
| #include <ATen/ops/ones.h> | ||||
| #include <ATen/ops/scalar_tensor_native.h> | ||||
| #include <ATen/ops/vdot_native.h> | ||||
| #endif | ||||
|  | ||||
| using at::blas::ScalingType; | ||||
| using at::blas::SwizzleType; | ||||
|  | ||||
| namespace at::cuda::scaled { | ||||
|  | ||||
| static bool _scaled_mm_allowed_device(bool sm90_only=false, bool sm100_only=false) { | ||||
| #ifdef USE_ROCM | ||||
|     static const std::vector<std::string> archs = { | ||||
|         "gfx942", | ||||
| #if ROCM_VERSION >= 60300 | ||||
|         "gfx1200", "gfx1201", | ||||
| #endif | ||||
| #if ROCM_VERSION >= 60500 | ||||
|         "gfx950" | ||||
| #endif | ||||
|     }; | ||||
|     return at::detail::getCUDAHooks().isGPUArch(archs); | ||||
| #else | ||||
|     auto dprops = at::cuda::getCurrentDeviceProperties(); | ||||
|  | ||||
|     if (sm90_only || sm100_only) { | ||||
|       return (sm90_only && dprops->major == 9) || (sm100_only && dprops->major == 10); | ||||
|     } else { | ||||
|       return dprops->major >= 9 || (dprops->major == 8 && dprops->minor == 9); | ||||
|     } | ||||
| #endif | ||||
| } | ||||
|  | ||||
| #ifdef USE_ROCM | ||||
| static bool _scaled_mm_is_fnuz() { | ||||
|     return at::detail::getCUDAHooks().isGPUArch({"gfx942"}); | ||||
| } | ||||
| #endif | ||||
| /** | ||||
|  * Track concrete implementations available | ||||
|  */ | ||||
| enum class ScaledGemmImplementation { | ||||
|   NONE = 0, | ||||
|   TENSORWISE_TENSORWISE = 1, | ||||
|   ROWWISE_ROWWISE = 2, | ||||
|   BLOCK_128x128_1x128 = 3, | ||||
|   BLOCK_1x128_128x128 = 4, | ||||
|   BLOCK_1x128_1x128 = 5, | ||||
|   MXFP8_MXFP8 = 6, | ||||
|   NVFP4_NVFP4 = 7, | ||||
|   NVFP4_NVFP4_SINGLE_SCALE = 8, | ||||
|   MXFP4_MXFP4 = 9, | ||||
| }; | ||||
|  | ||||
| /** | ||||
|  * Convert passed int (enum) from python back into a | ||||
|  * strictly-typed enum | ||||
|  */ | ||||
| template <class EnumType, class ArrayType> | ||||
| std::vector<EnumType> convert_int_to_enum(ArrayType& v) { | ||||
|   std::vector<EnumType> converted; | ||||
|   converted.reserve(v.size()); | ||||
|  | ||||
|   for (auto vi : v) { | ||||
|     converted.push_back(static_cast<EnumType>(vi)); | ||||
|   } | ||||
|   return converted; | ||||
| } | ||||
|  | ||||
| bool check_tensorwise_recipe(c10::ScalarType, | ||||
|                              std::vector<ScalingType>&, | ||||
|                              ArrayRef<Tensor>&, | ||||
|                              c10::ScalarType, | ||||
|                              std::vector<ScalingType>&, | ||||
|                              ArrayRef<Tensor>&); | ||||
|  | ||||
|  | ||||
| bool check_rowwise_recipe(c10::ScalarType, | ||||
|                              std::vector<ScalingType>&, | ||||
|                              ArrayRef<Tensor>&, | ||||
|                              c10::ScalarType, | ||||
|                              std::vector<ScalingType>&, | ||||
|                              ArrayRef<Tensor>&); | ||||
|  | ||||
| bool check_nvfp4_recipe(c10::ScalarType, | ||||
|                         std::vector<ScalingType>&, | ||||
|                         ArrayRef<Tensor>&, | ||||
|                         c10::ScalarType, | ||||
|                         std::vector<ScalingType>&, | ||||
|                         ArrayRef<Tensor>&); | ||||
|  | ||||
| bool check_nvfp4_recipe_single_scale | ||||
|                        (c10::ScalarType, | ||||
|                         std::vector<ScalingType>&, | ||||
|                         ArrayRef<Tensor>&, | ||||
|                         c10::ScalarType, | ||||
|                         std::vector<ScalingType>&, | ||||
|                         ArrayRef<Tensor>&); | ||||
|  | ||||
| bool check_deepseek_recipe(ScalingType, | ||||
|                            ScalingType, | ||||
|                            c10::ScalarType, | ||||
|                            std::vector<ScalingType>&, | ||||
|                            ArrayRef<Tensor>&, | ||||
|                            c10::ScalarType, | ||||
|                            std::vector<ScalingType>&, | ||||
|                            ArrayRef<Tensor>&); | ||||
|  | ||||
| bool check_mxfp8_recipe(c10::ScalarType, | ||||
|                         std::vector<ScalingType>&, | ||||
|                         ArrayRef<Tensor>&, | ||||
|                         c10::ScalarType, | ||||
|                         std::vector<ScalingType>&, | ||||
|                         ArrayRef<Tensor>&); | ||||
|  | ||||
| bool check_mxfp4_recipe(c10::ScalarType, | ||||
|                         std::vector<ScalingType>&, | ||||
|                         ArrayRef<Tensor>&, | ||||
|                         c10::ScalarType, | ||||
|                         std::vector<ScalingType>&, | ||||
|                         ArrayRef<Tensor>&); | ||||
|  | ||||
| } // namespace at::native::cuda::blas::scaled | ||||
| @ -137,7 +137,7 @@ struct CUDACachingHostAllocatorImpl | ||||
|   void free_block_slowpath(Block* block) { | ||||
|     auto start = std::chrono::steady_clock::now(); | ||||
|     // Users may change the allocator config at will. torch unit tests do this. | ||||
|     // However, allocations using cudaHostRegister should use corresonding | ||||
|     // However, allocations using cudaHostRegister should use corresponding | ||||
|     // cudaHostUnregister and similarly for cudaHostAlloc / cudaFreeHost. | ||||
|     void* ptr = block->ptr_; | ||||
|     bool use_register = false; | ||||
|  | ||||
| @ -4,7 +4,7 @@ | ||||
| #include <ATen/cuda/CUDAConfig.h> | ||||
|  | ||||
| // NOTE: These templates are intentionally not defined in this header, | ||||
| // which aviods re-compiling them for each translation unit. If you get | ||||
| // which avoids re-compiling them for each translation unit. If you get | ||||
| // a link error, you need to add an explicit instantiation for your | ||||
| // types in cub.cu | ||||
|  | ||||
|  | ||||
| @ -38,7 +38,7 @@ GemmTunableOp_float_NT,nt_25088_4096_64,1219,1.262 | ||||
| GemmTunableOp_float_NT,nt_4096_4096_64,1216,0.033 | ||||
| ``` | ||||
|  | ||||
| Note the "Validator" lines. If you change a library verison, or ROCm version, or PyTorch version, TunableOp will detect | ||||
| Note the "Validator" lines. If you change a library version, or ROCm version, or PyTorch version, TunableOp will detect | ||||
| this and reject the tunings file because the prior tunings are likely affected by other software changes. | ||||
|  | ||||
| The remaining lines are the tuned solutions for each TunableOp encountered during your execution. Each line consists of | ||||
|  | ||||
| @ -235,7 +235,7 @@ class TunableOp { | ||||
|       // numeric check option is controlled by non-static env var, so check it once per tuned operator | ||||
|       bool do_numerics_check = ctx->IsNumericsCheckEnabled(); | ||||
|  | ||||
|       // calcaulte a reference answer for numerical check | ||||
|       // calculate a reference answer for numerical check | ||||
|       if (do_numerics_check) { | ||||
|         reference_params = params->DeepCopy(false); | ||||
|         TORCH_CHECK(ops_[ResultEntry::Default()]->Call(reference_params) == OK); | ||||
|  | ||||
| @ -12,7 +12,7 @@ namespace at { | ||||
|  | ||||
| // AcceleratorHooksInterface is a shared interface provided by all | ||||
| // accelerators to allow generic code. | ||||
| // This inferface is hook-based as it corresponds to all the functions | ||||
| // This interface is hook-based as it corresponds to all the functions | ||||
| // that are going to be called in a generic way from the CPU code. | ||||
|  | ||||
| struct TORCH_API AcceleratorHooksInterface { | ||||
|  | ||||
| @ -38,7 +38,7 @@ struct TORCH_API PrivateUse1HooksInterface : AcceleratorHooksInterface { | ||||
|  | ||||
|   Generator getNewGenerator( | ||||
|       [[maybe_unused]] DeviceIndex device_index = -1) const override { | ||||
|     // TODO(FFFrog): Perserved for BC and will be removed in the future. | ||||
|     // TODO(FFFrog): Preserved for BC and will be removed in the future. | ||||
|     if (at::GetGeneratorPrivate().has_value()) | ||||
|       return at::GetGeneratorForPrivateuse1(device_index); | ||||
|  | ||||
|  | ||||
| @ -283,7 +283,7 @@ inline void boxed_existing_bdim_all_batch_rule( | ||||
| // Use when all tensors arguments accept one (normal) batch dim. | ||||
| // This batching rule expands the batch dim on all Tensors, reshapes it into | ||||
| // dim 0, calls the op, and then reshapes the batch dim out of dim 0. | ||||
| // This is not the most efficient thing; if there are alternatives, plese try | ||||
| // This is not the most efficient thing; if there are alternatives, please try | ||||
| // to use them. Use this only as a last resort. | ||||
| #define EXISTING_BDIM_ALL_BOXED(op) \ | ||||
|   m.impl(#op, torch::CppFunction::makeFromBoxedFunction<boxed_existing_bdim_all_batch_rule>()); | ||||
|  | ||||
| @ -384,7 +384,7 @@ fourOutputs solve_ex_batch_rule( | ||||
|  | ||||
|   // NOTE [ solve_ex Batch Rule Contiguity ] | ||||
|   // A determines whether or not linalg_solve takes an optimized path. We need the check on A_ to match the one run on | ||||
|   // A as BatchedTensor since it might have been saved by autograd (specifically by the jvp) and the autograd behvaior | ||||
|   // A as BatchedTensor since it might have been saved by autograd (specifically by the jvp) and the autograd behavior | ||||
|   // differs based on whether or not the optimized path was taken | ||||
|   const auto batched_A_was_contiguous = A_bdim.has_value() ? at::select(A, *A_bdim, 0).is_contiguous() : A.is_contiguous(); | ||||
|   if (batched_A_was_contiguous && !A.is_complex()) { | ||||
|  | ||||
| @ -282,7 +282,7 @@ static std::tuple<Tensor, std::optional<int64_t>> _softmax_backward_batch_rule( | ||||
|  | ||||
|   dim = getPhysicalDim(output_, /*has_batch_dim*/true, dim); | ||||
|  | ||||
|   // Not sure why output_ needs to be marked as .contiguous(). Someting must | ||||
|   // Not sure why output_ needs to be marked as .contiguous(). Something must | ||||
|   // have changed in PyTorch (and output of softmax is probably always contiguous) | ||||
|   return std::make_tuple(at::_softmax_backward_data(grad_output_, output_.contiguous(), dim, input_dtype), 0); | ||||
| } | ||||
|  | ||||
| @ -224,7 +224,7 @@ static Tensor safeStack(TensorList tensors) { | ||||
|   // is possible for the backward function to return an undefined grad for some | ||||
|   // grad_input for each example. In that case, we return an undefined grad. | ||||
|   // | ||||
|   // It is theoretically posssible for *some* of the examples to produce an | ||||
|   // It is theoretically possible for *some* of the examples to produce an | ||||
|   // undefined grad (a kernel could peek at the gradient values and return an | ||||
|   // undefined tensor if it determines the gradient is full of zeros). We | ||||
|   // could handle this by treating the undefined grad as a zero-filled tensor | ||||
|  | ||||
| @ -113,7 +113,7 @@ SymIntArrayRef BatchedTensorImpl::sym_sizes_custom() const { | ||||
|   return sym_sizes_default(); | ||||
| } | ||||
|  | ||||
| // The following are publically exposed as methods of Tensor | ||||
| // The following are publicly exposed as methods of Tensor | ||||
|  | ||||
| IntArrayRef BatchedTensorImpl::strides_custom() const { | ||||
|   return strides_default(); | ||||
|  | ||||
| @ -37,7 +37,7 @@ namespace at::functorch  { | ||||
| // how to perform the transform. | ||||
| // | ||||
| // TODO: we can excise DynamicLayer in favor of Interpreter, | ||||
| // But I am going to leave it for now as a compatiblity shim to avoid | ||||
| // But I am going to leave it for now as a compatibility shim to avoid | ||||
| // needing to refactor a lot of callsites... | ||||
| struct TORCH_API DynamicLayer { | ||||
|   explicit DynamicLayer( | ||||
|  | ||||
| @ -88,7 +88,7 @@ std::ostream& operator<<(std::ostream& os, const TransformType& t); | ||||
| // >>> VmapInterpreterPtr(&interpreter).batchSize() | ||||
| // | ||||
| // Finally, Interpreter::process switches on the type of the interpreter | ||||
| // and calls one of {Transform}Intepreter::processImpl under the hood. | ||||
| // and calls one of {Transform}Interpreter::processImpl under the hood. | ||||
| // Same for Interpreter::sendToNextInterpreter :) | ||||
|  | ||||
| struct VmapInterpreterMeta { | ||||
|  | ||||
| @ -733,7 +733,7 @@ TORCH_LIBRARY_IMPL(_, FuncTorchBatched, m) { | ||||
| } | ||||
|  | ||||
| TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) { | ||||
|   // still legacy b/c teturns multiple tensors | ||||
|   // still legacy b/c returns multiple tensors | ||||
|   m.impl("split.Tensor", split_batching_rule); | ||||
|   m.impl("split_with_sizes", split_with_sizes_batching_rule); | ||||
|   m.impl("split_with_sizes_copy", split_with_sizes_copy_batching_rule); | ||||
|  | ||||
| @ -158,7 +158,7 @@ void MPSStream::fill(id<MTLBuffer> buffer, uint8_t value, size_t length, size_t | ||||
|       endKernelCoalescing(); | ||||
|       id<MTLBlitCommandEncoder> blitEncoder = [commandBuffer() blitCommandEncoder]; | ||||
|  | ||||
|       // For some reason fillBufferfor stopped working for lengh > 4Gb on MacOS 26 | ||||
|       // For some reason fillBufferfor stopped working for length > 4Gb on MacOS 26 | ||||
|       // See https://github.com/pytorch/pytorch/issues/163962 | ||||
|       // Workaround by batching copy commands into 4Gb chunks | ||||
|       constexpr size_t max_copy_size = 0x100000000; // 4GB | ||||
|  | ||||
| @ -148,7 +148,7 @@ inline void checkInputsSolver(const Tensor& A, | ||||
|  | ||||
| inline bool is_row_or_column_contiguous(const Tensor& t) { | ||||
|   // This could be made more general, similar to how it's checked in matmul, which would allow to | ||||
|   // ellide the copy with strides such as (6, 12, 1, 3) or (3, 1, 9), but this is quite tricky. | ||||
|   // elide the copy with strides such as (6, 12, 1, 3) or (3, 1, 9), but this is quite tricky. | ||||
|   // We choose to be conservative for simplicity | ||||
|   return t.is_contiguous() || t.transpose(-2, -1).is_contiguous(); | ||||
| } | ||||
|  | ||||
| @ -21,7 +21,7 @@ enum class fft_norm_mode { | ||||
| // NOTE [ Fourier Transform Conjugate Symmetry ] | ||||
| // | ||||
| // Real-to-complex Fourier transform satisfies the conjugate symmetry. That is, | ||||
| // assuming X is the transformed K-dimensionsal signal, we have | ||||
| // assuming X is the transformed K-dimensional signal, we have | ||||
| // | ||||
| //     X[i_1, ..., i_K] = X[j_i, ..., j_K]*, | ||||
| // | ||||
|  | ||||
| @ -128,7 +128,7 @@ at::Tensor PackedLinearWeight::apply_impl( | ||||
|   auto* input_tr_ptr = | ||||
|       reinterpret_cast<uint8_t*>(input_tr.data_ptr<c10::quint8>()); | ||||
|   // TODO: Activation transpose before and after the kernel can be removed if we | ||||
|   // keep activation tensor always tranposed. | ||||
|   // keep activation tensor always transposed. | ||||
|   fbgemm::transpose_simd<uint8_t>( | ||||
|       batch_size, K, input_ptr, K, input_tr_ptr, batch_size); | ||||
|  | ||||
|  | ||||
| @ -520,7 +520,7 @@ cpu_adaptive_avg_pool3d_channels_last( | ||||
|       scalar_t* out = output_data + i * channels; | ||||
|       int64_t size = channels; | ||||
|  | ||||
|       // Note: For oridinary usage scenario, each out lane should | ||||
|       // Note: For ordinary usage scenario, each out lane should | ||||
|       //   fit in L1 cache; otherwise consider block dim C. | ||||
|       // Pass I: zero the out lane | ||||
|       int64_t d1 = 0; | ||||
|  | ||||
| @ -34,7 +34,7 @@ struct Dist { | ||||
|   //     finish :   This tells what to do with the aggregated value to compute | ||||
|   //                the norm. Generally this is the result of val ^ (1 / p). | ||||
|   //     backward : This is the gradient for that norm. Arguments are pretty | ||||
|   //                self explanitory. | ||||
|   //                self explanatory. | ||||
|   // | ||||
|   // There are a few cases where these aren't used. The 0 norm has no backward, | ||||
|   // because it's always 0, so that's shortcircuited earlier. There's a special | ||||
|  | ||||
| @ -30,7 +30,7 @@ vec::Vectorized<scalar_t> is_nan_vec(vec::Vectorized<scalar_t> vec) { | ||||
|   return vec.isnan(); | ||||
| } | ||||
|  | ||||
| // TODO: use is_integeral/is_same to check the scalar_t and simplify the implementation | ||||
| // TODO: use is_integral/is_same to check the scalar_t and simplify the implementation | ||||
| // currently it does not work | ||||
| template <> | ||||
| vec::Vectorized<unsigned char> is_nan_vec<unsigned char>(vec::Vectorized<unsigned char> vec) { | ||||
|  | ||||
| @ -74,7 +74,7 @@ it to sum up the entire array into a single value. | ||||
|  | ||||
| `ReduceOpsKernel.cpp` uses the `CPU_CAPABILITY_*` macros to "know" under which | ||||
| compiler flags it is currently compiled. This allows the programmer to write | ||||
| generic code, which will be compiled under multipled compilation settings. | ||||
| generic code, which will be compiled under multiplied compilation settings. | ||||
|  | ||||
| `../ReduceOps.cpp` now includes the header `ReduceOpsKernel.h`, which contains | ||||
| a generic definition of `sumImplAll`. This function allows the user to reduce | ||||
|  | ||||
| @ -889,7 +889,7 @@ void ImagingResampleHorizontalConvolution8u( | ||||
|             _mm_loadu_si128((__m128i *) (lineIn_min + stride * i))), | ||||
|             _mm_loadu_si128((__m128i *) (lineIn_min + stride * (i + 4))), 1); | ||||
|  | ||||
|         // Extract lower part of each lane, cast to epi16 and reoder RGBARGBA -> RRGGBBAA | ||||
|         // Extract lower part of each lane, cast to epi16 and reorder RGBARGBA -> RRGGBBAA | ||||
|         // RGBA: pix1 = [ | ||||
|         //   r0 0 r1 0  g0 0 g1 0  b0 0 b1 0  a0 0 a1 0 | ||||
|         //   r4 0 r5 0  g4 0 g5 0  b4 0 b5 0  a4 0 a5 0 | ||||
|  | ||||
| @ -240,7 +240,7 @@ _PS256_CONST(coscof_p2,  4.166664568298827E-002); | ||||
| _PS256_CONST(cephes_FOPI, 1.27323954473516); // 4 / M_PI | ||||
|  | ||||
|  | ||||
| /* evaluation of 8 sines at onces using AVX intrinsics | ||||
| /* evaluation of 8 sines at once using AVX intrinsics | ||||
|  | ||||
|    The code is the exact rewriting of the cephes sinf function. | ||||
|    Precision is excellent as long as x < 8192 (I did not bother to | ||||
|  | ||||
| @ -311,7 +311,7 @@ void GroupNormKernelImplChannelsLastInternal( | ||||
|   const bool gamma_null = (gamma_data == nullptr); | ||||
|   const bool beta_null = beta_data == nullptr; | ||||
|  | ||||
|   // NB: About algorithm choosen: | ||||
|   // NB: About algorithm chosen: | ||||
|   // | ||||
|   // On channels last, GroupNorm has a input shape of {N, H, W, GD}, | ||||
|   // Mean and rstd are collected per each n and g, which involves reduction | ||||
|  | ||||
| @ -930,7 +930,7 @@ void ref_dyn_quant_matmul_4bit_channelwise_kernel( | ||||
|         } | ||||
|       }; | ||||
|  | ||||
|   // Dynamically Quantize the float32 input to 8 bit assymetric | ||||
|   // Dynamically Quantize the float32 input to 8 bit asymmetric | ||||
|   input_quant_pack_8bit_channelwise(m, k, lhs_f32, (int8_t*)lhs_qa8dx); | ||||
|  | ||||
|   const size_t lhs_stride = | ||||
| @ -1163,7 +1163,7 @@ void dyn_quant_matmul_4bit_kernel( | ||||
|   const int64_t weight_packed_size = | ||||
|       kleidiai::kai_pack_rhs_int4_size(N, K, block_size); | ||||
|   if (weight_packed_size == packed_weights.numel()) { | ||||
|     // KleidiAI interface intenally handles the Channelwise and groupwise | ||||
|     // KleidiAI interface internally handles the Channelwise and groupwise | ||||
|     // distinction | ||||
|     kleidiai::kai_quant_pack_lhs_int4_mm( | ||||
|         output, inp, packed_weights, M, N, K, block_size); | ||||
|  | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -1,11 +1,11 @@ | ||||
| #define TORCH_ASSERT_ONLY_METHOD_OPERATORS | ||||
| #include <ATen/core/Tensor.h> | ||||
| #include <ATen/Context.h> | ||||
| #include <ATen/Dispatch.h> | ||||
| #include <ATen/Dispatch_v2.h> | ||||
| #include <ATen/cuda/CachingHostAllocator.h> | ||||
| #include <ATen/core/Tensor.h> | ||||
| #include <ATen/cuda/CUDAContext.h> | ||||
| #include <ATen/cuda/CUDAEvent.h> | ||||
| #include <ATen/cuda/CachingHostAllocator.h> | ||||
| #include <ATen/cuda/PeerToPeerAccess.h> | ||||
| #include <ATen/native/Copy.h> | ||||
| #include <ATen/native/TensorIterator.h> | ||||
| @ -27,6 +27,24 @@ | ||||
|  | ||||
| namespace at::native { | ||||
|  | ||||
| namespace { | ||||
|  | ||||
| // Initial pool size for CUDA events per device. | ||||
| constexpr size_t kInitialEventPoolSize = 8; | ||||
|  | ||||
| at::cuda::CUDAEventPtr getEventFromPool(const at::DeviceIndex device_idx) { | ||||
|   static auto* event_pool = []() { | ||||
|     auto* pool = new at::cuda::EventPool(); | ||||
|     // Pre-populate the pool with events to avoid stalls in creating events | ||||
|     pool->init_num_events(kInitialEventPoolSize); | ||||
|     return pool; | ||||
|   }(); | ||||
|  | ||||
|   return event_pool->get(device_idx); | ||||
| } | ||||
|  | ||||
| } // namespace | ||||
|  | ||||
| void neg_kernel_cuda(TensorIteratorBase &iter); | ||||
| void conj_kernel_cuda(TensorIteratorBase &iter); | ||||
|  | ||||
| @ -263,12 +281,14 @@ void copy_device_to_device(TensorIterator& iter, | ||||
|     // write-after-read dependencies on the destination side are handled, so | ||||
|     // that no one is operating on the dst memory when we perform the copy. | ||||
|     // src waits on dst barrier (src already waits on src) | ||||
|     CUDAEvent dst_ready; | ||||
|  | ||||
|     // Use event pool for better performance instead of creating new events | ||||
|     auto dst_ready = getEventFromPool(dst_device.index()); | ||||
|     device_guard.set_device(dst_device); | ||||
|     dst_ready.record(getCurrentCUDAStream(dst_device.index())); | ||||
|     dst_ready->record(getCurrentCUDAStream(dst_device.index())); | ||||
|  | ||||
|     device_guard.set_device(src_device); | ||||
|     dst_ready.block(copy_stream); | ||||
|     dst_ready->block(copy_stream); | ||||
|   } | ||||
|  | ||||
|   if (memcpy_eligible) { | ||||
| @ -307,11 +327,11 @@ void copy_device_to_device(TensorIterator& iter, | ||||
|     // operate on dst's copy until the copy is complete. | ||||
|  | ||||
|     // Still on src_device, record stream event | ||||
|     CUDAEvent src_ready; | ||||
|     src_ready.record(copy_stream); | ||||
|     auto src_ready = getEventFromPool(src_device.index()); | ||||
|     src_ready->record(copy_stream); | ||||
|  | ||||
|     device_guard.set_device(dst_device); | ||||
|     src_ready.block(getCurrentCUDAStream(dst_device.index())); | ||||
|     src_ready->block(getCurrentCUDAStream(dst_device.index())); | ||||
|   } | ||||
|  | ||||
|   AT_CUDA_CHECK(cudaGetLastError()); | ||||
|  | ||||
| @ -494,7 +494,7 @@ void uniform_kernel(TensorIteratorBase& iter, double from_, double to_, RNG gen) | ||||
|       auto value = static_cast<scalar_t>(rand * range + from); | ||||
|       // reverse the bounds of curand4 from (0, 1] to [0, 1) | ||||
|       // Note that this method is from legacy THCTensorRandom and is likely to give | ||||
|       // you more 0-s, since, the probability of gettings 1-s is higher than 0-s and | ||||
|       // you more 0-s, since, the probability of getting 1-s is higher than 0-s and | ||||
|       // by reversing the bounds, we are flipping the probabilities of 1-s and 0-s. | ||||
|       // BEFORE TOUCHING THIS CODE READ: https://github.com/pytorch/pytorch/issues/16706 | ||||
|       auto reverse_bound_value = value == to ? from : value; | ||||
|  | ||||
							
								
								
									
										574
									
								
								aten/src/ATen/native/cuda/GroupedBlas.cpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										574
									
								
								aten/src/ATen/native/cuda/GroupedBlas.cpp
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,574 @@ | ||||
| #include <cstdint> | ||||
| #include <c10/util/typeid.h> | ||||
| #include <c10/util/Exception.h> | ||||
| #include <c10/util/SmallVector.h> | ||||
| #include <c10/core/Scalar.h> | ||||
| #include <c10/core/ScalarType.h> | ||||
| #include <c10/util/Exception.h> | ||||
| #define TORCH_ASSERT_ONLY_METHOD_OPERATORS | ||||
| #include <ATen/core/Tensor.h> | ||||
| #include <ATen/core/NamedTensor.h> | ||||
| #include <ATen/Dispatch.h> | ||||
| #include <ATen/ExpandUtils.h> | ||||
| #include <ATen/OpMathType.h> | ||||
| #include <ATen/TensorUtils.h> | ||||
| #include <ATen/cuda/CUDABlas.h> | ||||
| #include <ATen/cuda/CUDAScaledBlas.h> | ||||
| #include <ATen/cuda/tunable/Tunable.h> | ||||
| #include <ATen/cuda/tunable/TunableGemm.h> | ||||
| #include <ATen/native/Resize.h> | ||||
| #include <c10/util/MaybeOwned.h> | ||||
| #include <ATen/native/GroupedMMUtils.h> | ||||
| #include <ATen/native/cuda/RowwiseScaledMM.h> | ||||
| #include <ATen/native/cuda/ScaledGroupMM.h> | ||||
| #include <ATen/native/cuda/GroupMM.h> | ||||
| #include <ATen/ceil_div.h> | ||||
|  | ||||
| #ifdef USE_FBGEMM_GENAI | ||||
| #include <fbgemm_gpu/torch_ops.h> | ||||
| #endif | ||||
|  | ||||
| #ifndef AT_PER_OPERATOR_HEADERS | ||||
| #include <ATen/Functions.h> | ||||
| #include <ATen/NativeFunctions.h> | ||||
| #else | ||||
| #include <ATen/ops/_addmm_activation_native.h> | ||||
| #include <ATen/ops/_efficientzerotensor.h> | ||||
| #include <ATen/ops/_scaled_mm_native.h> | ||||
| #include <ATen/ops/_unsafe_view_native.h> | ||||
| #include <ATen/ops/abs.h> | ||||
| #include <ATen/ops/addmm_native.h> | ||||
| #include <ATen/ops/addmv_native.h> | ||||
| #include <ATen/ops/baddbmm_native.h> | ||||
| #include <ATen/ops/bmm_native.h> | ||||
| #include <ATen/ops/copy_native.h> | ||||
| #include <ATen/ops/dot_native.h> | ||||
| #include <ATen/ops/empty.h> | ||||
| #include <ATen/ops/empty_strided.h> | ||||
| #include <ATen/ops/gelu.h> | ||||
| #include <ATen/ops/max.h> | ||||
| #include <ATen/ops/mm_native.h> | ||||
| #include <ATen/ops/mul.h> | ||||
| #include <ATen/ops/relu.h> | ||||
| #include <ATen/ops/ones.h> | ||||
| #include <ATen/ops/scalar_tensor_native.h> | ||||
| #include <ATen/ops/vdot_native.h> | ||||
| #endif | ||||
|  | ||||
| using at::blas::ScalingType; | ||||
| using at::blas::SwizzleType; | ||||
|  | ||||
| namespace scaled_blas = at::cuda::scaled; | ||||
| using scaled_blas::ScaledGemmImplementation; | ||||
| using scaled_blas::convert_int_to_enum; | ||||
| using scaled_blas::_scaled_mm_allowed_device; | ||||
|  | ||||
| namespace at::native { | ||||
|  | ||||
| namespace { | ||||
|  | ||||
| // 2d-2d and 2d-3d | ||||
| // scaling=MXFP8 | ||||
| // CUDA-only | ||||
| Tensor& | ||||
| _mx8_mx8_bf16_grouped_mm_fbgemm( | ||||
|         const Tensor& mat_a, | ||||
|         const Tensor& mat_b, | ||||
|         const Tensor& scale_a, | ||||
|         const SwizzleType& swizzle_a, | ||||
|         const Tensor& scale_b, | ||||
|         const SwizzleType& swizzle_b, | ||||
|         const std::optional<at::Tensor>& offs, | ||||
|         Tensor& out) { | ||||
|     const bool a_is_2d = mat_a.dim() == 2; | ||||
|     const bool b_is_2d = mat_b.dim() == 2; | ||||
|     bool b_is_3d = mat_b.dim() == 3; | ||||
|     bool is_2d_2d = a_is_2d && b_is_2d; | ||||
|     bool is_2d_3d = a_is_2d && b_is_3d; | ||||
|     TORCH_CHECK_VALUE(is_2d_2d || is_2d_3d, "MXFP8 grouped GEMM currently only supports 2d-2d and 2d-3d cases"); | ||||
|     TORCH_CHECK_VALUE(offs.has_value(), "MXFP8 2d-2d and 2d-3d grouped GEMMs requires offsets"); | ||||
|     TORCH_CHECK_VALUE(out.scalar_type() == at::kBFloat16, "Only bf16 out_dtype is supported for MXFP8 grouped gemm"); | ||||
|     // MXFP8 expects float8_e8m0fnu scales. | ||||
|     TORCH_CHECK_VALUE(scale_a.scalar_type() == at::kFloat8_e8m0fnu && scale_b.scalar_type() == at::kFloat8_e8m0fnu, | ||||
|         "For MXFP8 grouped gemm, both scales must be float8_e8m0fnu tensors."); | ||||
| #ifdef USE_ROCM | ||||
|     TORCH_CHECK_VALUE(swizzle_a == SwizzleType::NO_SWIZZLE && swizzle_b == SwizzleType::NO_SWIZZLE, | ||||
|         "For ROCM MXFP8 grouped gemm, both scale swizzle types must be SWIZZLE_NONE"); | ||||
| #else | ||||
|     TORCH_CHECK_VALUE(swizzle_a == SwizzleType::SWIZZLE_32_4_4 && swizzle_b == SwizzleType::SWIZZLE_32_4_4, | ||||
|         "For CUDA MXFP8 grouped gemm, both scale swizzle types must be SWIZZLE_32_4_4"); | ||||
| #endif | ||||
|  | ||||
| #if defined(USE_FBGEMM_GENAI) and !defined(USE_ROCM) | ||||
|     fbgemm_gpu::mx8mx8bf16_grouped_mm( | ||||
|         mat_a, | ||||
|         mat_b, | ||||
|         scale_a, | ||||
|         scale_b, | ||||
|         offs.value(), | ||||
|         out); | ||||
| #else | ||||
|     TORCH_CHECK_NOT_IMPLEMENTED(false, "mxfp8_mxfp8 grouped gemm requires compile with USE_FBGEMM_GENAI"); | ||||
| #endif | ||||
|     return out; | ||||
| } | ||||
|  | ||||
| // 2d-2d and 2d-3d cases | ||||
| // scaling=rowwise | ||||
| // CUDA-only | ||||
| Tensor& | ||||
| _f8_f8_bf16_rowwise_grouped_mm_cuda( | ||||
|           const Tensor& mat_a, | ||||
|           const Tensor& mat_b, | ||||
|           const Tensor& scale_a, | ||||
|           const Tensor& scale_b, | ||||
|           const std::optional<Tensor>& offs, | ||||
|           const std::optional<Tensor>& bias, | ||||
|           const bool use_fast_accum, | ||||
|           Tensor& out) { | ||||
|   TORCH_CHECK_VALUE(mat_a.dtype() == at::kFloat8_e4m3fn, "Expected mat_a to be Float8_e4m3 matrix got ", mat_a.scalar_type()); | ||||
|   TORCH_CHECK_VALUE(mat_b.dtype() == at::kFloat8_e4m3fn, "Expected mat_a to be Float8_e4m3 matrix got ", mat_b.scalar_type()); | ||||
|  | ||||
|   at::cuda::detail::f8f8bf16_grouped_mm( | ||||
|       mat_a, | ||||
|       mat_b, | ||||
|       scale_a, | ||||
|       scale_b, | ||||
|       offs, | ||||
|       bias, | ||||
|       use_fast_accum, | ||||
|       out); | ||||
|     return out; | ||||
| } | ||||
|  | ||||
| // 2d-2d and 2d-3d cases | ||||
| // scaling=rowwise | ||||
| // only being called for rocm | ||||
| Tensor& | ||||
| _f8_f8_bf16_rowwise_grouped_mm_rocm( | ||||
|       const Tensor& mat_a, | ||||
|       const Tensor& mat_b, | ||||
|       const Tensor& scale_a, | ||||
|       const Tensor& scale_b, | ||||
|       const std::optional<Tensor>& offs, | ||||
|       Tensor& out) { | ||||
|   TORCH_CHECK_VALUE(mat_a.dtype() == at::kFloat8_e4m3fnuz, "Expected mat_a to be Float8_e4m3fnuz matrix got ", mat_a.scalar_type()); | ||||
|   TORCH_CHECK_VALUE(mat_b.dtype() == at::kFloat8_e4m3fnuz, "Expected mat_a to be Float8_e4m3fnuz matrix got ", mat_b.scalar_type()); | ||||
|  | ||||
| #if defined(USE_FBGEMM_GENAI) && defined(USE_ROCM) | ||||
|   fbgemm_gpu::f8f8bf16_rowwise_grouped_mm( | ||||
|       mat_a, | ||||
|       // FBGEMM expects B matrix shape to be (.., N, K) | ||||
|       mat_b.transpose(-2, -1), | ||||
|       scale_a, | ||||
|       scale_b, | ||||
|       offs, | ||||
|       out); | ||||
| #else | ||||
|   TORCH_CHECK_NOT_IMPLEMENTED(false, "grouped gemm is not supported without USE_FBGEMM_GENAI on ROCM") | ||||
| #endif | ||||
|   return out; | ||||
|  | ||||
| } | ||||
|  | ||||
| // Dispatch f8 x f8 -> bf16 row-wise scaled to rocm/cuda | ||||
| Tensor& | ||||
| _f8_f8_bf16_rowwise_grouped_mm( | ||||
|       const Tensor& mat_a, | ||||
|       const Tensor& mat_b, | ||||
|       const Tensor& scale_a, | ||||
|       const Tensor& scale_b, | ||||
|       const std::optional<Tensor>& offs, | ||||
|       const std::optional<Tensor>& bias, | ||||
|       bool use_fast_accum, | ||||
|       Tensor& out) { | ||||
|   // FP8 per-tensor and per-row scaling expect fp32 scales. | ||||
|   TORCH_CHECK_VALUE(scale_a.scalar_type() == kFloat && scale_b.scalar_type() == kFloat, | ||||
|       "For grouped FP8 rowwise, both scales must be float32 tensors"); | ||||
| #ifndef USE_ROCM | ||||
|   return _f8_f8_bf16_rowwise_grouped_mm_cuda( | ||||
|       mat_a, | ||||
|       mat_b, | ||||
|       scale_a, | ||||
|       scale_b, | ||||
|       offs, | ||||
|       bias, | ||||
|       use_fast_accum, | ||||
|       out); | ||||
| #else | ||||
|   // NOTE: ignore use_fast_accum | ||||
|   TORCH_CHECK_VALUE(!bias.has_value(), "ROCM grouped gemm does not support bias") | ||||
|   return _f8_f8_bf16_rowwise_grouped_mm_rocm( | ||||
|       mat_a, | ||||
|       mat_b, | ||||
|       scale_a, | ||||
|       scale_b, | ||||
|       offs, | ||||
|       out); | ||||
| #endif | ||||
| } | ||||
|  | ||||
| void _check_scales_fp8_rowwise(const Tensor& mat, const Tensor& scale, const int dim, const int arg_idx, const int scale_multiplier=1) { | ||||
|   // Checks scales for 2d or 3d target tensors (`mat`). | ||||
|   if (mat.dim() == 2) { | ||||
|     TORCH_CHECK( | ||||
|         scale.dim() == 1, | ||||
|         "scale must be a 1D tensor, but got ", | ||||
|         scale.dim(), | ||||
|         "D, arg ", | ||||
|         arg_idx); | ||||
|     TORCH_CHECK( | ||||
|         scale.is_contiguous(), "scale must be contiguous for arg ", arg_idx); | ||||
|     TORCH_CHECK( | ||||
|         scale.size(0) == mat.size(dim) * scale_multiplier, | ||||
|         "scale must have the same length as mat for arg ", | ||||
|         arg_idx); | ||||
|   } else { | ||||
|     TORCH_CHECK( | ||||
|         scale.dim() == 2, | ||||
|         "scale must be a 2D tensor, but got ", | ||||
|         scale.dim(), | ||||
|         "D for arg ", | ||||
|         arg_idx); | ||||
|     TORCH_CHECK( | ||||
|         scale.stride(1) == 1, | ||||
|         "scale must be contiguous in the last dimension for arg ", | ||||
|         arg_idx); | ||||
|     TORCH_CHECK( | ||||
|         scale.size(0) == mat.size(0), | ||||
|         "scale must have the same batch dimension as mat for arg ", | ||||
|         arg_idx); | ||||
|     TORCH_CHECK( | ||||
|         scale.size(1) == mat.size(1 + dim), | ||||
|         "scale must have the same first dimension as mat for arg ", | ||||
|         arg_idx); | ||||
|   } | ||||
| } | ||||
|  | ||||
| void _check_scales_mxfp8(const Tensor& mat, const Tensor& scale, const int dim, const int arg_idx) { | ||||
|   // Checks scales for 2d or 3d target tensors (`mat`). | ||||
|   if (mat.dim() == 2) { | ||||
|     // For MXFP8, 2d tensors have variable size groups represented as subtensors, | ||||
|     // that are converted to blocked padded format individually, | ||||
|     // so we can't check the scale sizes without doing a d2h sync to get the group sizes here. | ||||
|     TORCH_CHECK( | ||||
|       scale.dim() == mat.dim(), | ||||
|       "for mxfp8, scale must have same number of dimensions as parent tensor, but got mat.dim() = ", mat.dim(), " and scale.dim() = ", scale.dim(), " for arg ", arg_idx); | ||||
|  | ||||
|     // LHS mat shape (M, total_K) -> scale shape (rounded_up(M, 128), rounded_up_per_group(K/32, 4)) | ||||
|     // RHS mat shape (total_K, N) -> scale shape (rounded_up(N, 128), rounded_up_per_group(K/32, 4)) | ||||
|     //   * weight is transposed prior to the call, scale stays non-transposed. | ||||
|     bool LHS = arg_idx == 0; | ||||
|     int scale_dim_to_check = 0; | ||||
|     int mat_dim_to_check = LHS ? 0 : 1; | ||||
|     TORCH_CHECK( | ||||
|         scale.size(scale_dim_to_check) >= mat.size(mat_dim_to_check), | ||||
|         "for mxfp8, arg ", arg_idx, " tensor shape (", mat.size(0), ", ", mat.size(1), ") ", | ||||
|         "must have scale.shape[", scale_dim_to_check, "] >= ", mat.size(mat_dim_to_check), " but got scale.shape=(", scale.size(0), ", ", scale.size(1), ")"); | ||||
|   } else { | ||||
|     // For MXFP8, 3d tensors have static group sizes (stack of 2d tensors), | ||||
|     // so we can check the exact expected scale sizes here without a d2h sync. | ||||
|     auto round_up = [](auto x, auto y) { | ||||
|         return ((x + y - 1) / y) * y; | ||||
|     }; | ||||
|  | ||||
|     // TODO: this is for 3d tensor in 2d-3d case specifically. | ||||
|     // We'll need to support 3d-3d and 3d-2d cases once mxfp8 grouped gemm supports them. | ||||
|     int64_t G = mat.size(0); | ||||
|     int64_t K = mat.size(1); | ||||
|     int64_t N = mat.size(2); | ||||
|     int64_t blocked_scale_K = round_up(K/32, 4); | ||||
|     int64_t blocked_scale_N = round_up(N, 128); | ||||
|  | ||||
|     // fbgemm expects stack of flattened blocked scales for 3d tensor, shape (G, blocked_scale_K * blocked_scale_N). | ||||
|     TORCH_CHECK( | ||||
|       scale.dim() == mat.dim() - 1, | ||||
|       "for mxfp8 2d-3d grouped GEMM, the 3d tensor of shape (G,K,N) must have a 2d scale of shape (G, blocked_scale_K * blocked_scale_N), but scale is ", scale.dim(), "D for arg ", arg_idx | ||||
|     ); | ||||
|     TORCH_CHECK( | ||||
|       scale.size(0) == G && scale.size(1) == blocked_scale_K * blocked_scale_N, | ||||
|       "for mxfp8, the tensor shape (", G, ", ", K, ", ", N, ") must have scale shape (", G, ",", blocked_scale_K, ",", blocked_scale_N, ") for arg ", arg_idx | ||||
|     ); | ||||
|   } | ||||
| } | ||||
|  | ||||
| void check_scale(const Tensor& mat, const Tensor& scale, const int dim, const int arg_idx, const int scale_multiplier=1) { | ||||
|   bool using_fp8_rowwise = scale.scalar_type() == kFloat; | ||||
|   bool using_mxfp8 = scale.scalar_type() == at::kFloat8_e8m0fnu; | ||||
|   if (using_fp8_rowwise) { | ||||
|     _check_scales_fp8_rowwise(mat, scale, dim, arg_idx, scale_multiplier); | ||||
|   } else if (using_mxfp8) { | ||||
|     _check_scales_mxfp8(mat, scale, dim, arg_idx); | ||||
|   } else { | ||||
|     TORCH_CHECK(false, "scale must be float32 or float8_e8m0fnu, but got ", scale.dtype()); | ||||
|   } | ||||
| } | ||||
|  | ||||
| } // namespace | ||||
|  | ||||
| Tensor | ||||
| _scaled_grouped_mm_cuda( | ||||
|         const Tensor& mat_a, | ||||
|         const Tensor& mat_b, | ||||
|         const Tensor& scale_a, | ||||
|         const Tensor& scale_b, | ||||
|         const std::optional<at::Tensor>& offs, | ||||
|         const std::optional<at::Tensor>& bias, | ||||
|         const std::optional<at::Tensor>& scale_result, | ||||
|         std::optional<c10::ScalarType> out_dtype, | ||||
|         bool use_fast_accum) { | ||||
|   bool allowed_device = _scaled_mm_allowed_device(/*sm90_only*/true, /*sm100_only*/true); | ||||
|   TORCH_CHECK_VALUE(allowed_device, "torch._scaled_grouped_mm is only supported on CUDA devices with compute capability = [9.0, 10.0], or ROCm MI300+"); | ||||
|  | ||||
|   TORCH_CHECK_VALUE(!check_valid_strides_and_return_transposed(mat_a), "Expected mat1 to not be transposed"); | ||||
|   TORCH_CHECK_VALUE(check_valid_strides_and_return_transposed(mat_b), "Expected mat2 to be transposed"); | ||||
|   TORCH_CHECK_VALUE(mat_a.dim() == 2 || mat_a.dim() == 3, "mat_a has to be 2 or 3d"); | ||||
|   TORCH_CHECK_VALUE(mat_b.dim() == 2 || mat_b.dim() == 3, "mat_b has to be 2 or 3d"); | ||||
|   const bool a_is_2d = mat_a.dim() == 2; | ||||
|   const bool b_is_2d = mat_b.dim() == 2; | ||||
|  | ||||
|   // NOTE(slayton): For sub-1B formats want contraction_dim argument? | ||||
|   if (!a_is_2d || !b_is_2d) { | ||||
|     TORCH_CHECK_VALUE(mat_a.size(-1) == mat_b.size(-2), "contraction dimension of mat_a and mat_b must match"); | ||||
|   } | ||||
|   TORCH_CHECK_VALUE( | ||||
|     mat_a.size(-1) % 16 == 0, | ||||
|     "Expected trailing dimension of mat_a to be divisible by 16 ", | ||||
|     "but got mat1 shape: (", | ||||
|     mat_a.sizes(), | ||||
|     ")."); | ||||
|   TORCH_CHECK_VALUE(mat_b.size(-2) % 16 == 0 && mat_b.size(-1) % 16 == 0, | ||||
|     "Expected mat_b shape to be divisible by 16 ", | ||||
|     "but got mat_b shape: (", | ||||
|     mat_b.sizes(), | ||||
|     ")."); | ||||
|  | ||||
|  | ||||
|   TORCH_CHECK_VALUE(!bias.has_value(), "Bias not supported yet"); | ||||
|   TORCH_CHECK_VALUE(!scale_result.has_value(), "Scale result not supported yet"); | ||||
|   TORCH_CHECK_VALUE(offs.has_value() ==  (a_is_2d || b_is_2d), "Have to provide offsets if there is a 2d matrix"); | ||||
|  | ||||
|   // NOTE: mxfp8 x mxfp8 requires (and asserts later) that offsets is present. | ||||
|   //       for rowwise, no offsets implies 3d-3d and is handled by lower-level | ||||
|   //       routines | ||||
|   if (offs.has_value()) { | ||||
|     TORCH_CHECK_VALUE(offs->dim() == 1, "offs has to be 1D"); | ||||
|     TORCH_CHECK_VALUE(offs->dtype() == at::kInt, "Offsets have to be int32"); | ||||
|   } | ||||
|   // FP8 per-tensor and per-row scaling expect fp32 scales. | ||||
|   // MXFP8 expects float8_e8m0fnu scales. | ||||
|   TORCH_CHECK_VALUE( | ||||
|       (scale_a.scalar_type() == kFloat && scale_b.scalar_type() == kFloat) || | ||||
|       (scale_a.scalar_type() == at::kFloat8_e8m0fnu && scale_b.scalar_type() == at::kFloat8_e8m0fnu), | ||||
|       "For FP8 tensorwise and rowwise, both scales must both be float32 tensors. For MXFP8, scales must both be float8_e8m0fnu tensors."); | ||||
|  | ||||
|   const int scale_multiplier = (mat_a.dim() == 2 && mat_b.dim() == 2) ? offs->size(0) : 1; | ||||
|   check_scale(mat_a, scale_a, 0 ,0, scale_multiplier); | ||||
|   check_scale(mat_b, scale_b, 1, 1, scale_multiplier); | ||||
|  | ||||
|   const auto out_dtype_ = out_dtype.value_or(kBFloat16); | ||||
|   TORCH_CHECK_VALUE(out_dtype_ == kBFloat16, "Only bf16 high precision output types are supported for grouped gemm"); | ||||
|  | ||||
|   Tensor out = create_grouped_gemm_output_tensor(mat_a, mat_b, offs, out_dtype_); | ||||
|  | ||||
| #if defined(USE_FBGEMM_GENAI) && defined(USE_CUDA) && !defined(USE_ROCM) | ||||
|   // MXFP8 grouped GEMM dispatching | ||||
|   bool is_mx8mx8bf16 = ( | ||||
|     mat_a.scalar_type() == at::kFloat8_e4m3fn && mat_b.scalar_type() == at::kFloat8_e4m3fn && | ||||
|     scale_a.scalar_type() == at::kFloat8_e8m0fnu && scale_b.scalar_type() == at::kFloat8_e8m0fnu | ||||
|   ); | ||||
| #else | ||||
|   bool is_mx8mx8bf16 = false; | ||||
| #endif | ||||
|  | ||||
|   if (is_mx8mx8bf16) { | ||||
|     // Note: Passing implied SwizzleType here, correctness of scale previously checked | ||||
|     //       in `check_scale` call | ||||
|     return _mx8_mx8_bf16_grouped_mm_fbgemm( | ||||
|         mat_a, | ||||
|         mat_b, | ||||
|         scale_a, | ||||
|         SwizzleType::SWIZZLE_32_4_4, | ||||
|         scale_b, | ||||
|         SwizzleType::SWIZZLE_32_4_4, | ||||
|         offs.value(), | ||||
|         out); | ||||
|   } | ||||
|  | ||||
|   // If we're not MXFP8, then we're row-wise scaling. | ||||
|   return _f8_f8_bf16_rowwise_grouped_mm( | ||||
|       mat_a, | ||||
|       mat_b, | ||||
|       scale_a, | ||||
|       scale_b, | ||||
|       offs, | ||||
|       bias, | ||||
|       use_fast_accum, | ||||
|       out); | ||||
| } | ||||
|  | ||||
| namespace { | ||||
|  | ||||
| using acceptance_fn = std::function<bool(c10::ScalarType, std::vector<ScalingType>&, ArrayRef<Tensor>&, c10::ScalarType, std::vector<ScalingType>&, ArrayRef<Tensor>&)>; | ||||
|  | ||||
| std::array<std::tuple<std::string, acceptance_fn, ScaledGemmImplementation>, 2> scale_grouped_kernel_dispatch = {{ | ||||
|   { "rowwise_rowwise", scaled_blas::check_rowwise_recipe, ScaledGemmImplementation::ROWWISE_ROWWISE}, | ||||
|   { "mxfp8_mxfp8", scaled_blas::check_mxfp8_recipe, ScaledGemmImplementation::MXFP8_MXFP8}}}; | ||||
|  | ||||
| } // anonymous namespace | ||||
|  | ||||
| Tensor | ||||
| _scaled_grouped_mm_cuda_v2( | ||||
|           const Tensor& mat_a, const Tensor& mat_b, | ||||
|           ArrayRef<Tensor> scale_a, | ||||
|           IntArrayRef scale_recipe_a, | ||||
|           IntArrayRef swizzle_a, | ||||
|           ArrayRef<Tensor> scale_b, | ||||
|           IntArrayRef scale_recipe_b, | ||||
|           IntArrayRef swizzle_b, | ||||
|           const std::optional<Tensor>& offs, | ||||
|           const std::optional<Tensor>& bias, | ||||
|           const std::optional<c10::ScalarType> out_dtype, | ||||
|           IntArrayRef contraction_dim, | ||||
|           bool use_fast_accum) { | ||||
|   bool allowed_device = _scaled_mm_allowed_device(/*sm90_only*/true, /*sm100_only*/true); | ||||
|   TORCH_CHECK_VALUE(allowed_device, "torch._scaled_grouped_mm is only supported on CUDA devices with compute capability = [9.0, 10.0], or ROCm MI300+"); | ||||
|  | ||||
|   TORCH_CHECK_VALUE(!check_valid_strides_and_return_transposed(mat_a), "Expected mat1 to not be transposed"); | ||||
|   TORCH_CHECK_VALUE(check_valid_strides_and_return_transposed(mat_b), "Expected mat2 to be transposed"); | ||||
|   TORCH_CHECK_VALUE(mat_a.dim() == 2 || mat_a.dim() == 3, "mat_a has to be 2 or 3d"); | ||||
|   TORCH_CHECK_VALUE(mat_b.dim() == 2 || mat_b.dim() == 3, "mat_b has to be 2 or 3d"); | ||||
|   const bool a_is_2d = mat_a.dim() == 2; | ||||
|   const bool b_is_2d = mat_b.dim() == 2; | ||||
|  | ||||
|   // NOTE(slayton): For sub-1B formats want contraction_dim argument? | ||||
|   if (!a_is_2d || !b_is_2d) { | ||||
|     if (contraction_dim.size() > 0) { | ||||
|       const int dim_a = contraction_dim[0], dim_b = mat_b.size(contraction_dim[1]); | ||||
|       TORCH_CHECK_VALUE(mat_a.size(dim_a) == mat_b.size(dim_b), | ||||
|           "Contraction dimensions (", dim_a, ",", dim_b, ") of mat_a and mat_b must match, got: ", mat_a.size(dim_a), " and ", | ||||
|           mat_b.size(dim_b)); | ||||
|       // Note: only (-1, -2) is currently supported | ||||
|       TORCH_CHECK_VALUE(dim_a == -1 && dim_b == -2, "Curently contraction dims must be (-1, -2) only"); | ||||
|     } else { | ||||
|       TORCH_CHECK_VALUE(mat_a.size(-1) == mat_b.size(-2), "contraction dimension of mat_a and mat_b must match"); | ||||
|     } | ||||
|   } | ||||
|   TORCH_CHECK_VALUE( | ||||
|     mat_a.size(-1) % 16 == 0, | ||||
|     "Expected trailing dimension of mat_a to be divisible by 16 ", | ||||
|     "but got mat1 shape: (", | ||||
|     mat_a.sizes(), | ||||
|     ")."); | ||||
|   TORCH_CHECK_VALUE(mat_b.size(-2) % 16 == 0 && mat_b.size(-1) % 16 == 0, | ||||
|     "Expected mat_b shape to be divisible by 16 ", | ||||
|     "but got mat_b shape: (", | ||||
|     mat_b.sizes(), | ||||
|     ")."); | ||||
|  | ||||
|   TORCH_CHECK_VALUE(!bias.has_value(), "Bias not supported yet"); | ||||
|   TORCH_CHECK_VALUE(offs.has_value() ==  (a_is_2d || b_is_2d), "Have to provide offsets if there is a 2d matrix"); | ||||
|  | ||||
|   // NOTE: mxfp8 x mxfp8 requires (and asserts later) that offsets is present. | ||||
|   //       for rowwise, no offsets implies 3d-3d and is handled by lower-level | ||||
|   //       routines | ||||
|   if (offs.has_value()) { | ||||
|     TORCH_CHECK_VALUE(offs->dim() == 1, "offs has to be 1D"); | ||||
|     TORCH_CHECK_VALUE(offs->dtype() == at::kInt, "Offsets have to be int32"); | ||||
|   } | ||||
|  | ||||
|   const auto out_dtype_ = out_dtype.value_or(kBFloat16); | ||||
|   TORCH_CHECK_VALUE(out_dtype_ == kBFloat16, "Only bf16 high precision output types are supported for grouped gemm"); | ||||
|  | ||||
|   Tensor out = create_grouped_gemm_output_tensor(mat_a, mat_b, offs, out_dtype_); | ||||
|  | ||||
|   // Conversion of implicitly-defined enums to explicit | ||||
|   auto scale_recipe_a_enum = convert_int_to_enum<ScalingType>(scale_recipe_a); | ||||
|   auto swizzle_a_enum = convert_int_to_enum<SwizzleType>(swizzle_a); | ||||
|   auto scale_recipe_b_enum = convert_int_to_enum<ScalingType>(scale_recipe_b); | ||||
|   auto swizzle_b_enum = convert_int_to_enum<SwizzleType>(swizzle_b); | ||||
|  | ||||
|   // at this point we can start working out what we want to be doing | ||||
|   // Try to do as few steps as possible. | ||||
|   // NOTE: support is deliberately sparse, can explicitly enumerate all combinations allowed. | ||||
|   // Do this via a list of defined (name, acceptance, concrete_impl) tuples. | ||||
|   ScaledGemmImplementation gemm_impl = ScaledGemmImplementation::NONE; | ||||
|   for (const auto& fn_entry : scale_grouped_kernel_dispatch) { | ||||
|     const auto [name, accept_fn, scaled_gemm_impl] = fn_entry; | ||||
|     bool ok = accept_fn(mat_a.scalar_type(), | ||||
|                         scale_recipe_a_enum, | ||||
|                         scale_a, | ||||
|                         mat_b.scalar_type(), | ||||
|                         scale_recipe_b_enum, | ||||
|                         scale_b); | ||||
|     if (ok) { | ||||
|       gemm_impl = scaled_gemm_impl; | ||||
|       break; | ||||
|     } | ||||
|   } | ||||
|   TORCH_CHECK_VALUE(gemm_impl != ScaledGemmImplementation::NONE, | ||||
|       "No gemm implementation was found"); | ||||
|  | ||||
|   switch (gemm_impl) { | ||||
|     case ScaledGemmImplementation::ROWWISE_ROWWISE: { | ||||
|       const int scale_multiplier = (mat_a.dim() == 2 && mat_b.dim() == 2) ? offs->size(0) : 1; | ||||
|       _check_scales_fp8_rowwise(mat_a, scale_a[0], 0 /* dim */ , 0 /* arg_idx */, scale_multiplier); | ||||
|       _check_scales_fp8_rowwise(mat_b, scale_b[0], 1 /* dim */ , 1 /* arg_idx */, scale_multiplier); | ||||
|       return _f8_f8_bf16_rowwise_grouped_mm( | ||||
|           mat_a, | ||||
|           mat_b, | ||||
|           scale_a[0], | ||||
|           scale_b[0], | ||||
|           offs, | ||||
|           bias, | ||||
|           use_fast_accum, | ||||
|           out); | ||||
|     } | ||||
|     case ScaledGemmImplementation::MXFP8_MXFP8: { | ||||
|       _check_scales_mxfp8(mat_a, scale_a[0], 0 /* dim */, 0 /* arg_idx */); | ||||
|       _check_scales_mxfp8(mat_b, scale_b[0], 1 /* dim */, 1 /* arg_idx */); | ||||
|       return _mx8_mx8_bf16_grouped_mm_fbgemm( | ||||
|           mat_a, | ||||
|           mat_b, | ||||
|           scale_a[0], | ||||
|           swizzle_a_enum[0], | ||||
|           scale_b[0], | ||||
|           swizzle_b_enum[0], | ||||
|           offs.value(), | ||||
|           out); | ||||
|     } | ||||
|     default: | ||||
|       TORCH_CHECK_NOT_IMPLEMENTED(false, | ||||
|           "_scaled_grouped_mm_cuda_v2 is in an inconsistent state - should never reach here"); | ||||
|   } | ||||
| } | ||||
|  | ||||
| Tensor _grouped_mm_cuda(const Tensor& mat_a, const Tensor& mat_b, | ||||
| const std::optional<at::Tensor>& offs, | ||||
| const std::optional<at::Tensor>& bias, | ||||
| std::optional<c10::ScalarType> out_dtype) { | ||||
|   _grouped_mm_validate_inputs(mat_a, mat_b, offs, bias, out_dtype); | ||||
|   bool a_b_and_out_are_bf16 = ( | ||||
|     mat_a.dtype() == at::kBFloat16 && | ||||
|     mat_b.dtype() == at::kBFloat16 && | ||||
|     out_dtype.value_or(at::kBFloat16) == at::kBFloat16 | ||||
|   ); | ||||
| #ifndef USE_ROCM | ||||
|   bool use_fast_path = _scaled_mm_allowed_device(/*sm90_only*/true, /*sm100_only*/true) && a_b_and_out_are_bf16; | ||||
| #else | ||||
|   // _scaled_mm_allowed_device is used here within _grouped_mm_cuda which seems incorrect since scale is not used. | ||||
|   // the _grouped_mm_fallback should be safe for any ROCm GPU since it's just calling typical mm/bmm | ||||
|   bool use_fast_path = false; | ||||
| #endif | ||||
|   const auto out_dtype_ = _resolve_grouped_mm_out_dtype(mat_a, mat_b, out_dtype); | ||||
|   Tensor out = create_grouped_gemm_output_tensor(mat_a, mat_b, offs, out_dtype_); | ||||
|   if (use_fast_path) { | ||||
|     // fast path, no d2h sync needed | ||||
|     at::cuda::detail::bf16bf16_grouped_mm(mat_a, mat_b, offs, bias, out); | ||||
|   } else { | ||||
|     _grouped_mm_fallback(mat_a, mat_b, offs, bias, out_dtype, out); | ||||
|   } | ||||
|   return out; | ||||
| } | ||||
|  | ||||
| } // namespace at::native | ||||
| @ -6,7 +6,7 @@ | ||||
| #endif | ||||
|  | ||||
| // ROCm 6.3 is planned to have these functions, but until then here they are. | ||||
| #if defined(USE_ROCM) && ROCM_VERSION >= 60201 | ||||
| #if defined(USE_ROCM) | ||||
| #include <device_functions.h> | ||||
| #include <hip/hip_fp16.h> | ||||
| #include <hip/hip_bf16.h> | ||||
| @ -115,9 +115,7 @@ __device__ __forceinline__ void fastSpecializedAtomicAdd( | ||||
|     index_t index, | ||||
|     const index_t numel, | ||||
|     scalar_t value) { | ||||
| #if (                      \ | ||||
|     (defined(USE_ROCM) && ROCM_VERSION < 60201) || \ | ||||
|     (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700))) | ||||
| #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700)) | ||||
|   gpuAtomicAddNoReturn( | ||||
|       reinterpret_cast<at::Half*>(tensor) + index, | ||||
|       static_cast<at::Half>(value)); | ||||
| @ -160,9 +158,7 @@ __device__ __forceinline__ void fastSpecializedAtomicAdd( | ||||
|     index_t index, | ||||
|     const index_t numel, | ||||
|     scalar_t value) { | ||||
| #if (                      \ | ||||
|     (defined(USE_ROCM) && ROCM_VERSION < 60201) || \ | ||||
|     (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800))) | ||||
| #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800)) | ||||
|   gpuAtomicAddNoReturn( | ||||
|       reinterpret_cast<at::BFloat16*>(tensor) + index, | ||||
|       static_cast<at::BFloat16>(value)); | ||||
|  | ||||
| @ -154,7 +154,7 @@ REGISTER_CUDA_DISPATCH(lstsq_stub, &lazy_lstsq_kernel) | ||||
|  | ||||
| // Old style dispatches | ||||
| // torch_cuda_linalg dynamic library should have a global constructor | ||||
| // that calls regiserLinaglDispatch so in order ot lazy bind | ||||
| // that calls registerLinalgDispatch so in order ot lazy bind | ||||
| // old style dispatch all one have to do is to load library and call disp.func_name | ||||
| // Protect from infinite recursion by initializing dispatch to self and checking | ||||
| // that values are different after linalg library were loaded | ||||
|  | ||||
| @ -121,7 +121,7 @@ __device__ scalar_t reduce(Op op, PTA tensor, int plane) { | ||||
|     for (int x = threadIdx.x; x < tensor.size(2); x += blockDim.x*UNRL) { | ||||
| #pragma unroll | ||||
|       for (int u = 0; u < UNRL; u++) | ||||
|         tmp[u] = op(batch, plane, min((int)tensor.size(2)-1, (int)(x+u*blockDim.x))); | ||||
|         tmp[u] = op(batch, plane, std::min((int)tensor.size(2)-1, (int)(x+u*blockDim.x))); | ||||
| #pragma unroll | ||||
|       for (int u = 0; u < UNRL; u++) | ||||
|         if (x+u*blockDim.x < tensor.size(2)) | ||||
| @ -306,6 +306,22 @@ __global__ void batch_norm_collect_statistics_kernel( | ||||
|   stat_accscalar_t var_n = 0; | ||||
|   int n = 0; | ||||
|   for (int batch = threadIdx.y; batch < input.size(0); batch += blockDim.y) { | ||||
| #if defined(USE_ROCM) | ||||
|     constexpr int UNRL = 4; | ||||
|     stat_accscalar_t v_[UNRL]; | ||||
|     for (int x = threadIdx.x; x < input.size(2); x += blockDim.x*UNRL) { | ||||
|       for (int u = 0; u < UNRL; u++) | ||||
|         v_[u] = input[batch][plane][std::min(x+u*blockDim.x, input.size(2)-1)]; | ||||
|       for (int u = 0; u < UNRL; u++) { | ||||
|         if (x+u*blockDim.x < input.size(2)) { | ||||
|           stat_accscalar_t d1 = v_[u] - avg; | ||||
|           n++; | ||||
|           avg += d1 / n; | ||||
|           var_n += d1 * (v_[u] - avg); | ||||
|         } | ||||
|       } | ||||
|     } | ||||
| #else | ||||
|     for (int x = threadIdx.x; x < input.size(2); x += blockDim.x) { | ||||
|       stat_accscalar_t v = input[batch][plane][x]; | ||||
|       stat_accscalar_t d1 = v - avg; | ||||
| @ -313,6 +329,7 @@ __global__ void batch_norm_collect_statistics_kernel( | ||||
|       avg += d1 / n; | ||||
|       var_n += d1 * (v - avg); | ||||
|     } | ||||
| #endif | ||||
|   } | ||||
|  | ||||
|   // first warpSum to get one value per thread to | ||||
|  | ||||
							
								
								
									
										1284
									
								
								aten/src/ATen/native/cuda/ScaledBlas.cpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1284
									
								
								aten/src/ATen/native/cuda/ScaledBlas.cpp
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -43,6 +43,12 @@ std::tuple<Tensor&, Tensor&> kthvalue_out_impl_cuda( | ||||
|   TORCH_CHECK(k >= 1 && k <= slicesize, | ||||
|               "kthvalue(): selected number k out of range for dimension ", dim); | ||||
|  | ||||
|   TORCH_CHECK( | ||||
|       slicesize <= std::numeric_limits<int32_t>::max(), | ||||
|       "kthvalue(): dimension ", dim, " is too large (", slicesize, | ||||
|       "). The current CUDA implementation supports dimension sizes up to ", | ||||
|       std::numeric_limits<int32_t>::max()); | ||||
|  | ||||
|   at::assert_no_overlap(self, values); | ||||
|  | ||||
|   _reduction_with_indices_allocate_or_resize_output( | ||||
| @ -163,10 +169,6 @@ std::tuple<Tensor&, Tensor&> kthvalue_out_cuda( | ||||
|     bool keepdim, | ||||
|     Tensor& values, | ||||
|     Tensor& indices) { | ||||
|   // See note [Writing Nondeterministic Operations] | ||||
|   // If there are duplicate elements of the kth value, the procedure for choosing which | ||||
|   // of the duplicates to use for the indices output is nondeterministic. | ||||
|   at::globalContext().alertNotDeterministic("kthvalue CUDA"); | ||||
|   auto result = [&]() { | ||||
|     NoNamesGuard guard; | ||||
|     // `kthvalue_out_impl_cuda` expects contiguous in input `self`. | ||||
|  | ||||
| @ -65,25 +65,34 @@ __global__ void gatherKthValue( | ||||
|       &kValue); | ||||
|  | ||||
|   // Find the index of the k-th highest element | ||||
|   index_t kValueIndex = 0; | ||||
|   bool foundKValue = false; | ||||
|   __shared__ int32_t minIndexFound; | ||||
|  | ||||
|   if (threadIdx.x == 0) { | ||||
|       minIndexFound = static_cast<int32_t>(inputSliceSize); | ||||
|   } | ||||
|   __syncthreads(); | ||||
|  | ||||
|   for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) { | ||||
|     bool inRange = (i < inputSliceSize); | ||||
|     scalar_t v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) | ||||
|                          : static_cast<scalar_t>(0); | ||||
|     bool isKValue = inRange && | ||||
|         ((v == kValue) || (at::_isnan(v) && at::_isnan(kValue))); | ||||
|     if (isKValue) { | ||||
|       kValueIndex = i; | ||||
|       foundKValue = true; | ||||
|       break; | ||||
|     } | ||||
|       // Early exit based on best-so-far | ||||
|       if (i >= minIndexFound) { | ||||
|           break; | ||||
|       } | ||||
|  | ||||
|       scalar_t v = doLdg(&inputSliceStart[i * inputWithinSliceStride]); | ||||
|       bool isKValue = | ||||
|           ((v == kValue) || (at::_isnan(v) && at::_isnan(kValue))); | ||||
|  | ||||
|       if (isKValue) { | ||||
|           atomicMin(&minIndexFound, static_cast<int32_t>(i)); | ||||
|           break; | ||||
|       } | ||||
|   } | ||||
|  | ||||
|   if (foundKValue) { | ||||
|     kthValueSliceStart[0] = kValue; | ||||
|     indicesSliceStart[0] = kValueIndex; | ||||
|   __syncthreads(); | ||||
|  | ||||
|   if (threadIdx.x == 0) { | ||||
|       indicesSliceStart[0] = static_cast<index_t>(minIndexFound); | ||||
|       kthValueSliceStart[0] = kValue; | ||||
|   } | ||||
| } | ||||
|  | ||||
|  | ||||
| @ -127,6 +127,29 @@ __global__ void upsample_bilinear2d_nhwc_out_frame( | ||||
|   } | ||||
| } | ||||
|  | ||||
| #ifdef USE_ROCM | ||||
| // Helper function to compute output pixel range that can contribute to input pixel | ||||
| template <typename accscalar_t> | ||||
| __device__ __forceinline__ void compute_output_range( | ||||
|     int input_pos, | ||||
|     accscalar_t scale, | ||||
|     int output_size, | ||||
|     bool align_corners, | ||||
|     int& min_output, | ||||
|     int& max_output) { | ||||
|   accscalar_t lo, hi; | ||||
|   if (align_corners) { | ||||
|       lo = static_cast<accscalar_t>(input_pos - 1) / scale; | ||||
|       hi = static_cast<accscalar_t>(input_pos + 1) / scale; | ||||
|   } else { | ||||
|       lo = (input_pos - static_cast<accscalar_t>(0.5)) / scale - static_cast<accscalar_t>(0.5); | ||||
|       hi = (input_pos + static_cast<accscalar_t>(1.5)) / scale - static_cast<accscalar_t>(0.5); | ||||
|   } | ||||
|   min_output = max(0, static_cast<int>(std::ceil(lo))); | ||||
|   max_output = min(output_size - 1, static_cast<int>(std::floor(hi))); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| // Backward (adjoint) operation 1 <- 2 (accumulates) | ||||
| template <typename scalar_t, typename accscalar_t> | ||||
| C10_LAUNCH_BOUNDS_1(1024) | ||||
| @ -141,8 +164,74 @@ __global__ void upsample_bilinear2d_backward_out_frame( | ||||
|     const bool align_corners, | ||||
|     scalar_t* __restrict__ idata, | ||||
|     const scalar_t* __restrict__ odata) { | ||||
|   const size_t o_numel = nc * width2 * height2; | ||||
|   // In C++, integer multiplication, like in standard arithmetic, is generally commutative. | ||||
|   const size_t i_numel = nc * width1 * height1; | ||||
| #ifdef USE_ROCM | ||||
|   for (size_t index = blockDim.x * blockIdx.x + threadIdx.x; index < i_numel; | ||||
|        index += blockDim.x * gridDim.x) { | ||||
|     // Decode input pixel coordinates | ||||
|     size_t index_temp = index; | ||||
|     const int w1 = index_temp % width1; | ||||
|     index_temp /= width1; | ||||
|     const int h1 = index_temp % height1; | ||||
|     const size_t nc_idx = index_temp / height1; | ||||
|  | ||||
|     accscalar_t grad_sum = 0; | ||||
|  | ||||
|     // Find range of output pixels that could interpolate from this input pixel | ||||
|     int h2_min, h2_max, w2_min, w2_max; | ||||
|     compute_output_range<accscalar_t>(h1, rheight, height2, align_corners, h2_min, h2_max); | ||||
|     compute_output_range<accscalar_t>(w1, rwidth, width2, align_corners, w2_min, w2_max); | ||||
|  | ||||
|     // Iterate over potential output pixels | ||||
|     for (int h2 = h2_min; h2 <= h2_max; h2++) { | ||||
|       for (int w2 = w2_min; w2 <= w2_max; w2++) { | ||||
|         // Compute source coordinates for this output pixel | ||||
|         const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>( | ||||
|             rheight, h2, align_corners, /*cubic=*/false); | ||||
|         const int h1_base = (int)h1r; | ||||
|         const int h1p = (h1_base < height1 - 1) ? 1 : 0; | ||||
|         const accscalar_t h1lambda = h1r - h1_base; | ||||
|         const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda; | ||||
|  | ||||
|         const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( | ||||
|             rwidth, w2, align_corners, /*cubic=*/false); | ||||
|         const int w1_base = (int)w1r; | ||||
|         const int w1p = (w1_base < width1 - 1) ? 1 : 0; | ||||
|         const accscalar_t w1lambda = w1r - w1_base; | ||||
|         const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; | ||||
|  | ||||
|         // Check if our input pixel participates in this interpolation and accumulate all weights | ||||
|         // At boundaries, h1p=0 or w1p=0 causes some sampling positions to collapse | ||||
|         // to the same pixel, so we need to accumulate weights from all matching positions | ||||
|         accscalar_t weight = 0; | ||||
|  | ||||
|         // Check all four interpolation positions and accumulate weights | ||||
|         if (h1 == h1_base && w1 == w1_base) { | ||||
|           weight += h0lambda * w0lambda;  // top-left | ||||
|         } | ||||
|         if (h1 == h1_base && w1 == w1_base + w1p) { | ||||
|           weight += h0lambda * w1lambda;  // top-right (may be same as top-left if w1p=0) | ||||
|         } | ||||
|         if (h1 == h1_base + h1p && w1 == w1_base) { | ||||
|           weight += h1lambda * w0lambda;  // bottom-left (may be same as top-left if h1p=0) | ||||
|         } | ||||
|         if (h1 == h1_base + h1p && w1 == w1_base + w1p) { | ||||
|           weight += h1lambda * w1lambda;  // bottom-right (may collapse to other positions) | ||||
|         } | ||||
|  | ||||
|         if (weight > 0) { | ||||
|           const size_t output_idx = nc_idx * height2 * width2 + h2 * width2 + w2; | ||||
|           grad_sum += weight * static_cast<accscalar_t>(odata[output_idx]); | ||||
|         } | ||||
|       } | ||||
|     } | ||||
|  | ||||
|     // Write accumulated gradient (no atomics needed) | ||||
|     idata[index] = static_cast<scalar_t>(grad_sum); | ||||
|   } | ||||
| #else | ||||
|   const size_t o_numel = nc * width2 * height2; | ||||
|   for (size_t index = blockDim.x * blockIdx.x + threadIdx.x; index < o_numel; | ||||
|        index += blockDim.x * gridDim.x) { | ||||
|     size_t index_temp = index; | ||||
| @ -191,6 +280,7 @@ __global__ void upsample_bilinear2d_backward_out_frame( | ||||
|         static_cast<scalar_t>(h1lambda * w1lambda * d2val), | ||||
|         true); | ||||
|   } | ||||
| #endif | ||||
| } | ||||
|  | ||||
| template <typename scalar_t, typename accscalar_t> | ||||
| @ -387,7 +477,6 @@ static void upsample_bilinear2d_backward_out_cuda_template( | ||||
|   // threads are not covering the whole input tensor. | ||||
|   grad_input.zero_(); | ||||
|  | ||||
|   const size_t num_kernels = nbatch * channels * output_height * output_width; | ||||
|   const int num_threads = std::min( | ||||
|       at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); | ||||
|   cudaStream_t stream = at::cuda::getCurrentCUDAStream(); | ||||
| @ -397,6 +486,12 @@ static void upsample_bilinear2d_backward_out_cuda_template( | ||||
|     return; | ||||
|   } | ||||
|  | ||||
| #ifdef USE_ROCM | ||||
|   constexpr bool use_input = true; | ||||
| #else | ||||
|   constexpr bool use_input = false; | ||||
| #endif | ||||
|  | ||||
|   AT_DISPATCH_FLOATING_TYPES_AND2( | ||||
|       at::ScalarType::Half, at::ScalarType::BFloat16, | ||||
|       grad_output_.scalar_type(), "upsample_bilinear2d_backward_out_frame", [&] { | ||||
| @ -414,6 +509,8 @@ static void upsample_bilinear2d_backward_out_cuda_template( | ||||
|       const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( | ||||
|           input_width, output_width, align_corners, scales_w); | ||||
|  | ||||
|       const size_t num_kernels = nbatch * channels * output_height * output_width; | ||||
|  | ||||
|       upsample_bilinear2d_backward_nhwc_out_frame<scalar_t, accscalar_t> | ||||
|           <<<ceil_div(num_kernels, static_cast<size_t>(num_threads)), num_threads, 0, stream>>>( | ||||
|               input_height, | ||||
| @ -444,6 +541,8 @@ static void upsample_bilinear2d_backward_out_cuda_template( | ||||
|       const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( | ||||
|           input_width, output_width, align_corners, scales_w); | ||||
|  | ||||
|       const size_t num_kernels = nbatch * channels * (use_input ? input_height * input_width : output_height * output_width); | ||||
|  | ||||
|       upsample_bilinear2d_backward_out_frame<scalar_t, accscalar_t> | ||||
|           <<<ceil_div(num_kernels, static_cast<size_t>(num_threads)), | ||||
|              num_threads, | ||||
|  | ||||
							
								
								
									
										171
									
								
								aten/src/ATen/native/cuda/cuBlasCommonArgs.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										171
									
								
								aten/src/ATen/native/cuda/cuBlasCommonArgs.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,171 @@ | ||||
| #pragma once | ||||
|  | ||||
| #include <ATen/core/Tensor.h> | ||||
|  | ||||
| namespace at::native { | ||||
|  | ||||
| using at::blas::ScalingType; | ||||
| using at::blas::SwizzleType; | ||||
|  | ||||
| namespace { | ||||
|  | ||||
| // TODO: https://github.com/pytorch/pytorch/pull/59380#pullrequestreview-725310492 | ||||
| c10::MaybeOwned<Tensor> inline resolve_conj_if_indicated(const Tensor& tensor, bool resolve_conj) { | ||||
|   if (resolve_conj && tensor.is_conj()) { | ||||
|     return c10::MaybeOwned<Tensor>::owned(tensor.resolve_conj()); | ||||
|   } else { | ||||
|     return c10::MaybeOwned<Tensor>::borrowed(tensor); | ||||
|   } | ||||
| } | ||||
|  | ||||
| c10::MaybeOwned<Tensor> inline prepare_matrix_for_cublas(const Tensor& tensor, bool& transpose_tensor, bool transpose_result) { | ||||
|   if (tensor.is_non_overlapping_and_dense()) { // common case | ||||
|       transpose_tensor = tensor.is_contiguous(); | ||||
|       return resolve_conj_if_indicated(tensor, transpose_result ? transpose_tensor : !transpose_tensor); | ||||
|   } | ||||
|   IntArrayRef tensor_strides = tensor.strides(); | ||||
|   IntArrayRef tensor_sizes = tensor.sizes(); | ||||
|   if ((tensor_strides[0] == 1) && (tensor_strides[1] >= std::max<int64_t>(1, tensor_sizes[0]))) { | ||||
|     transpose_tensor = false; | ||||
|     return resolve_conj_if_indicated(tensor, !transpose_result); | ||||
|   } else if ((tensor_strides[1] == 1) && (tensor_strides[0] >= std::max<int64_t>(1, tensor_sizes[1]))) { | ||||
|     transpose_tensor = true; | ||||
|     return resolve_conj_if_indicated(tensor, transpose_result); | ||||
|   } else { | ||||
|     transpose_tensor = true; | ||||
|     return c10::MaybeOwned<Tensor>::owned(tensor.clone(at::MemoryFormat::Contiguous)); | ||||
|   } | ||||
| } | ||||
|  | ||||
| c10::MaybeOwned<Tensor> inline prepare_matrix_for_cublas(const Tensor& tensor, bool& transpose_tensor) { | ||||
|   if (tensor.is_non_overlapping_and_dense()) { // common case | ||||
|       transpose_tensor = tensor.is_contiguous(); | ||||
|       return resolve_conj_if_indicated(tensor, true); | ||||
|   } | ||||
|  | ||||
|   IntArrayRef tensor_strides = tensor.strides(); | ||||
|   IntArrayRef tensor_sizes = tensor.sizes(); | ||||
|   if ((tensor_strides[0] == 1) && (tensor_strides[1] >= std::max<int64_t>(1, tensor_sizes[0]))) { | ||||
|     transpose_tensor = false; | ||||
|     return resolve_conj_if_indicated(tensor, true); | ||||
|   } else if ((tensor_strides[1] == 1) && (tensor_strides[0] >= std::max<int64_t>(1, tensor_sizes[1]))) { | ||||
|     transpose_tensor = true; | ||||
|     return resolve_conj_if_indicated(tensor, true); | ||||
|   } else { | ||||
|     transpose_tensor = true; | ||||
|     return c10::MaybeOwned<Tensor>::owned(tensor.clone(at::MemoryFormat::Contiguous)); | ||||
|   } | ||||
| } | ||||
|  | ||||
| } // namespace | ||||
|  | ||||
| /** | ||||
|  * @brief Prepares matrices for CUBLAS operation | ||||
|  * | ||||
|  * This constructor prepares tensors for CUBLAS | ||||
|  * The main difference is that PyTorch uses row-major as the default and | ||||
|  * CUBLAS expects column-major. | ||||
|  * | ||||
|  * @details | ||||
|  * To enable row-major output while using CUBLAS, | ||||
|  * we use the mathematical identity that (A × B)^T = B^T × A^T. | ||||
|  * | ||||
|  * Transpose in this context refers to Cublas's(Fortran) definition of transpose (row-major) | ||||
|  * T = row-major, N = col-major | ||||
|  * | ||||
|  * Example: | ||||
|  * For matrices A (M×K)(row-major) and B (K×N)(row-major): | ||||
|  *   - Standard multiplication: A × B = (M×K) × (K×N) = M×N result (row-major) | ||||
|  *   - Using our transpose trick: (B^T × A^T) = (N×K)(T) × (K×M)(T) = N×M(N) | ||||
|  *   - However, since the output form cublas is column-major this is | ||||
|  *   - equivalent to an output of size MxN row-major as expected | ||||
|  * | ||||
|  * The transpose flags are derived from the layouts of the passed in tensors | ||||
|  * | ||||
|  * If the operands are in packed float4 format, `k`, `lda` and `ldb` are adjusted | ||||
|  * to their unpacked values to match what cuBLAS expects. | ||||
|  * | ||||
|  * @param mat1 First input matrix | ||||
|  * @param mat2 Second input matrix | ||||
|  * @param c Output matrix (result) | ||||
|  * @param scale_a Optional scaling factor for first matrix | ||||
|  * @param scale_b Optional scaling factor for second matrix | ||||
|  * @param scale_result Optional scaling factor for result | ||||
|  */ | ||||
| struct cublasCommonArgs { | ||||
|   cublasCommonArgs( | ||||
|       const Tensor& mat1, | ||||
|       const Tensor& mat2, | ||||
|       Tensor& c, | ||||
|       const std::optional<Tensor>& scale_a = std::nullopt, | ||||
|       const std::optional<Tensor>& scale_b = std::nullopt, | ||||
|       const std::optional<Tensor>& scale_result = std::nullopt, | ||||
|       const std::optional<ScalingType>& scaling_choice_a = std::nullopt, | ||||
|       const std::optional<ScalingType>& scaling_choice_b = std::nullopt) { | ||||
|     bool transpose_result = false, transpose_a = false, transpose_b = false; | ||||
|     result = prepare_matrix_for_cublas(c, transpose_result); | ||||
|     mata = prepare_matrix_for_cublas(transpose_result ? mat2 : mat1, transpose_a, transpose_result); | ||||
|     matb = prepare_matrix_for_cublas(transpose_result ? mat1 : mat2, transpose_b, transpose_result); | ||||
|  | ||||
|     // Handle scale tensors if provided | ||||
|     if (scale_a && scale_b) { | ||||
|       // By default since we return in row-major we run the gemm | ||||
|       // as B.T @ A.T, check transpose_result to determine if we flip the scales | ||||
|       scale_mata_ptr = transpose_result ? scale_b->data_ptr() : scale_a->data_ptr(); | ||||
|       scale_mata_dtype = transpose_result ? scale_b->scalar_type() : scale_a->scalar_type(); | ||||
|       scaling_mata_type = transpose_result ? scaling_choice_b : scaling_choice_a; | ||||
|       scale_matb_ptr = transpose_result ? scale_a->data_ptr() : scale_b->data_ptr(); | ||||
|       scale_matb_dtype = transpose_result ? scale_a->scalar_type() : scale_b->scalar_type(); | ||||
|       scaling_matb_type = transpose_result ? scaling_choice_a : scaling_choice_b; | ||||
|     } | ||||
|  | ||||
|     if (scale_result) { | ||||
|       scale_result_ptr = scale_result->data_ptr(); | ||||
|       scale_result_dtype = scale_result->scalar_type(); | ||||
|     } | ||||
|  | ||||
|     // Update transpose flags | ||||
|     if (transpose_result) { | ||||
|       transpose_a = !transpose_a; | ||||
|       transpose_b = !transpose_b; | ||||
|     } | ||||
|  | ||||
|     auto sizes_a = mata->sizes(); | ||||
|     auto sizes_b = matb->sizes(); | ||||
|  | ||||
|     m = sizes_a[transpose_result ? 1 : 0]; | ||||
|     k = sizes_a[transpose_result ? 0 : 1]; | ||||
|     n = sizes_b[transpose_result ? 0 : 1]; | ||||
|     lda = mata->stride((transpose_a == transpose_result) ? 1 : 0); | ||||
|     ldb = matb->stride((transpose_b == transpose_result) ? 1 : 0); | ||||
|     result_ld = result->stride(transpose_result ? 0 : 1); | ||||
|     transa = transpose_a ? mata->is_conj() ? 'c' : 't' : 'n'; | ||||
|     transb = transpose_b ? matb->is_conj() ? 'c' : 't' : 'n'; | ||||
|  | ||||
|     // cuBLAS expects unpacked values of `k`, `lda` and `ldb`, adjust for 4x2 packing | ||||
|     // if the gemm operands are in packed float4 | ||||
|     if (mat1.dtype() == at::kFloat4_e2m1fn_x2 && mat2.dtype() == at::kFloat4_e2m1fn_x2) { | ||||
|       k = k * 2; | ||||
|       lda = lda * 2; | ||||
|       ldb = ldb * 2; | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   // Matrix members | ||||
|   char transa, transb; | ||||
|   int64_t m, n, k; | ||||
|   int64_t lda, ldb, result_ld; | ||||
|   c10::MaybeOwned<Tensor> mata, matb, result; | ||||
|  | ||||
|   // Scale members | ||||
|   void* scale_mata_ptr = nullptr; | ||||
|   void* scale_matb_ptr = nullptr; | ||||
|   void* scale_result_ptr = nullptr; | ||||
|   std::optional<c10::ScalarType> scale_mata_dtype; | ||||
|   std::optional<ScalingType> scaling_mata_type; | ||||
|   std::optional<c10::ScalarType> scale_matb_dtype; | ||||
|   std::optional<ScalingType> scaling_matb_type; | ||||
|   std::optional<c10::ScalarType> scale_result_dtype; | ||||
| }; | ||||
|  | ||||
| } // namespace at::native | ||||
| @ -1,4 +1,4 @@ | ||||
| #if (defined(USE_ROCM) && ROCM_VERSION >= 50700) || ((defined(CUDA_VERSION) && CUDA_VERSION >= 12000) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800))) | ||||
| #if defined(USE_ROCM) || ((defined(CUDA_VERSION) && CUDA_VERSION >= 12000) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800))) | ||||
| #include <cuda_bf16.h> | ||||
| #include <cuda_fp16.h> | ||||
| #include <cuda_runtime.h> | ||||
| @ -133,7 +133,7 @@ inline __host__ __device__ uint32_t getAlignmentRoundUp(const void* p) { | ||||
| #define CDNA2_OR_LATER 0 | ||||
| #endif | ||||
|  | ||||
| #if (defined(USE_ROCM) && ROCM_VERSION >= 50700) || ((defined(CUDA_VERSION) && CUDA_VERSION >= 12000) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800))) | ||||
| #if defined(USE_ROCM) || ((defined(CUDA_VERSION) && CUDA_VERSION >= 12000) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800))) | ||||
|  | ||||
| #if defined(USE_ROCM) | ||||
| // TODO: Support RDNA | ||||
| @ -1161,7 +1161,7 @@ at::Tensor _weight_int4pack_mm_cuda( | ||||
|   auto C_final = at::empty( | ||||
|       {m, n}, at::TensorOptions().dtype(at::kBFloat16).device(A.device())); | ||||
|  | ||||
| #if (defined(USE_ROCM) && ROCM_VERSION >= 50700) || ((defined(CUDA_VERSION) && CUDA_VERSION >= 12000) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800))) | ||||
| #if defined(USE_ROCM) || ((defined(CUDA_VERSION) && CUDA_VERSION >= 12000) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800))) | ||||
|   auto stream = at::cuda::getCurrentCUDAStream(); | ||||
| #define RUN_GEMM(WARPS, K_TILES_PER_WARP, Q_GROUP_SIZE, REDUCE_TYPE) \ | ||||
|   do {                                                               \ | ||||
| @ -1327,7 +1327,7 @@ at::Tensor _convert_weight_to_int4pack_cuda( | ||||
|       {nTilesTensor, kSuperTiles, 32, innerKTiles / 2}, | ||||
|       at::TensorOptions().dtype(at::kInt).device(in.device())); | ||||
|  | ||||
| #if (defined(USE_ROCM) && ROCM_VERSION >= 50700) || ((defined(CUDA_VERSION) && CUDA_VERSION >= 12000) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800))) | ||||
| #if defined(USE_ROCM) || ((defined(CUDA_VERSION) && CUDA_VERSION >= 12000) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800))) | ||||
|   auto stream = at::cuda::getCurrentCUDAStream(); | ||||
|   dim3 grid(kSuperTiles, nTiles); | ||||
|  | ||||
|  | ||||
| @ -1532,7 +1532,7 @@ NvrtcFunction jit_pwise_function( | ||||
|  | ||||
|   std::string file_path; | ||||
|   if (cache_dir.has_value()) { | ||||
|     // Attemps to read from the cache. | ||||
|     // Attempts to read from the cache. | ||||
|     // Cubin name is <kernel name>_arch<major>.<minor>_nvrtc<major>.<minor>_<ptx or sass>_<program length>_<string hash> | ||||
|     // Note that the SHA1 hash used in the file name is NOT the SHA1 hash of the file's contents, | ||||
|     //   because we hash on the CUDA code, but we save the compiled ptx or sass | ||||
|  | ||||
| @ -1346,7 +1346,7 @@ void cholesky_helper_magma(const Tensor& input, bool upper, const Tensor& info) | ||||
|     }); | ||||
|  | ||||
|   if (input.dim() > 2) { | ||||
|     // if upper=true we need to tranpose and conjugate the result tensor | ||||
|     // if upper=true we need to transpose and conjugate the result tensor | ||||
|     // because the cholesky decomposition is stored in the lower triangular part | ||||
|     if (upper) { | ||||
|       input.copy_(result.mH()); | ||||
| @ -1857,7 +1857,7 @@ void geqrf_kernel(const Tensor& input, const Tensor& tau) { | ||||
|  | ||||
|   auto preferred_backend = at::globalContext().linalgPreferredBackend(); | ||||
|   switch (preferred_backend) { | ||||
|   // TODO Investigate whether the following magma bug is still occuring. | ||||
|   // TODO Investigate whether the following magma bug is still occurring. | ||||
|   // It may be the case that geqrf followed by orgqr is wrong for the magma backend | ||||
|   // geqrf_magma currently uses geqrf2_gpu | ||||
|   // | ||||
|  | ||||
| @ -82,7 +82,7 @@ void lu_factor_looped_cusolver(const Tensor& self, const Tensor& pivots, const T | ||||
| #if defined(BUILD_LAZY_CUDA_LINALG) | ||||
| namespace cuda { namespace detail { | ||||
| // This is only used for an old-style dispatches | ||||
| // Please do not add any new entires to it | ||||
| // Please do not add any new entries to it | ||||
| struct LinalgDispatch { | ||||
|    Tensor (*cholesky_solve_helper)(const Tensor& self, const Tensor& A, bool upper); | ||||
| }; | ||||
|  | ||||
| @ -147,7 +147,7 @@ static void check_shape_forward(const Tensor& input, | ||||
| //  blocked format will propagate between layers. Input, output will be in blocked format. | ||||
| // | ||||
| //  For inference case, weight can be prepacked into blocked format by | ||||
| //  (so as to save weight reoder overhead): | ||||
| //  (so as to save weight reorder overhead): | ||||
| //      model = torch.utils.mkldnn.to_mkldnn(model) | ||||
| // | ||||
| //  For training case, grad_output can be CPU tensor or MKLDNN tensor, | ||||
| @ -723,7 +723,7 @@ Tensor _mkldnn_convolution_transpose( | ||||
|   ideep::tensor w = itensor_from_tensor(weight, /*from_const_data_ptr*/true); | ||||
|   if (!weight.is_mkldnn()) { | ||||
|     // mkldnn transposed convolution has weight in logical order of OIHW or OIDHW, | ||||
|     // while PyTorch has IOHW or IODHW, `._tranpose()` switches strides (no memory copy). | ||||
|     // while PyTorch has IOHW or IODHW, `._transpose()` switches strides (no memory copy). | ||||
|     w.transpose_(0, 1); | ||||
|   } | ||||
|  | ||||
|  | ||||
| @ -540,7 +540,7 @@ static void _mkldnn_matmul_i8i8i32_with_primitive( | ||||
|   args.insert({DNNL_ARG_WEIGHTS, expected_weight}); | ||||
|   args.insert({DNNL_ARG_DST, dst}); | ||||
|   args.insert({DNNL_ARG_SCRATCHPAD, scratchpad}); | ||||
|   // Create primitve and execute | ||||
|   // Create primitive and execute | ||||
|   auto primitive = dnnl::matmul(prim_desc); | ||||
|   primitive.execute(ideep::stream::default_stream(), args); | ||||
| } | ||||
|  | ||||
| @ -439,7 +439,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> mkldnn_rnn_la | ||||
| // I. Memory Formats | ||||
| //   a. mkldnn will use plain formats for input, hx/cx, output, hy/cy | ||||
| //      and possibly use blocked formats for weights depending shape info. | ||||
| //   b. All mkldnn memorys are created (in plain format) as views on ATen tensor, | ||||
| //   b. All mkldnn memories are created (in plain format) as views on ATen tensor, | ||||
| //      the weight reorder(if any) is handed automatically inside ideep (mkldnn bridge) | ||||
| // | ||||
| // II. MKLDNN Primitive Mapping | ||||
|  | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user
	