Enforce PEP263 for PyTorch python codebase (#55346)

Summary:
All python files containing non-ASCII characters should be correctly annotated with `# -*- coding: utf-8 -*-` comment

Delete number of superfluous UTF-8 characters, most commonly UTF-8 opening closing quotation mark U+2019 (’) instead of ascii apostrophe ', for example `Module’s`->`Module's`

Pull Request resolved: https://github.com/pytorch/pytorch/pull/55346

Reviewed By: samestep

Differential Revision: D27582044

Pulled By: malfet

fbshipit-source-id: c1cd89655915858ff3a41f675cdfffff795a8e44
This commit is contained in:
Nikita Shulga
2021-04-06 18:29:56 -07:00
committed by Facebook GitHub Bot
parent 34a7b4aabb
commit add49e7e4e
31 changed files with 46 additions and 27 deletions

View File

@ -13,6 +13,7 @@ ignore =
# these ignores are from flake8-comprehensions; please fix!
C400,C401,C402,C403,C404,C405,C407,C411,C413,C414,C415
per-file-ignores = __init__.py: F401 torch/utils/cpp_extension.py: B950
optional-ascii-coding = True
exclude =
./.git,
./build_code_analyzer,

View File

@ -2,6 +2,7 @@ flake8==3.8.2
flake8-bugbear==20.1.4
flake8-comprehensions==3.3.0
flake8-executable==2.0.4
git+https://github.com/malfet/flake8-coding.git
flake8-pyi==20.5.0
mccabe
pycodestyle==2.6.0

View File

@ -150,7 +150,7 @@ The main goal of this process is to rephrase all the commit messages below to ma
* **Please cleanup, and format commit titles to be readable by the general pytorch user.** [Detailed intructions here](https://fb.quip.com/OCRoAbEvrRD9#HdaACARZZvo)
* Please sort commits into the following categories (you should not rename the categories!), I tried to pre-sort these to ease your work, feel free to move commits around if the current categorization is not good.
* Please drop any commits that are not user-facing.
* If anything is from another domain, leave it in the UNTOPICED section at the end and Ill come and take care of it.
* If anything is from another domain, leave it in the UNTOPICED section at the end and I'll come and take care of it.
The categories below are as follows:

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
import inspect
from io import BytesIO
from sys import version_info

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
from io import BytesIO
from sys import version_info
from textwrap import dedent

View File

@ -1860,7 +1860,7 @@ class TestFX(JitTestCase):
traced(5)
self.assertIn("Call using an FX-traced Module, line 4 of the "
"traced Modules generated forward function:",
"traced Module's generated forward function:",
captured[0])
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
@ -1882,7 +1882,7 @@ class TestFX(JitTestCase):
captured = traceback.format_exc()
self.assertNotIn("Call using an FX-traced Module, line 4 of the"
" traced Modules generated forward function:",
" traced Module's generated forward function:",
captured)
def test_ast_rewriter_rewrites_assert(self):

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
import unittest
import os
import sys

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
import torch
import numpy as np

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
import torch
import numpy as np

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import bz2
import datetime

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
import unittest
from tools import print_test_stats

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
"""Adds docstrings to functions defined in the torch._C"""
import re

View File

@ -36,7 +36,7 @@ the job such as the region or stage (dev vs prod).
**Publish Metrics**:
Using torchelastics metrics API is similar to using pythons logging
Using torchelastic's metrics API is similar to using python's logging
framework. You first have to configure a metrics handler before
trying to add metric data.

View File

@ -1,4 +1,5 @@
#!/usr/bin/env/python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.

View File

@ -95,7 +95,7 @@ class RendezvousHandler(abc.ABC):
def num_nodes_waiting(self) -> int:
"""
Returns number of workers who *arrived late* at
the rendezvous barrier, hence werent included in the current worker
the rendezvous barrier, hence weren't included in the current worker
group.
Callers should periodically call this method to check whether

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.

View File

@ -101,7 +101,7 @@ class _RemoteModule(nn.Module):
``def forward_async(input: Tensor) -> Future[Tensor]:``.
Args:
remote_device (str): Device on the destination worker where wed like to place this module.
remote_device (str): Device on the destination worker where we'd like to place this module.
The format should be "<workername>/<device>", where the device field can be parsed as torch.device type.
E.g., "trainer0/cpu", "trainer0", "ps0/cuda:0".
In addition, the device field can be optional and the default value is "cpu".
@ -355,7 +355,7 @@ class RemoteModule(_RemoteModule):
| ``def forward_async(input: Tensor) -> Future[Tensor]:``
Args:
remote_device (str): Device on the destination worker where wed like to place this module.
remote_device (str): Device on the destination worker where we'd like to place this module.
The format should be "<workername>/<device>", where the device field can be parsed as torch.device type.
E.g., "trainer0/cpu", "trainer0", "ps0/cuda:0".
In addition, the device field can be optional and the default value is "cpu".

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.

View File

@ -3,7 +3,7 @@ def _parse_remote_device(remote_device: str):
Parses the remote device.
Args:
remote_device (str): Device on the destination worker where wed like to place this module.
remote_device (str): Device on the destination worker where we'd like to place this module.
The format should be "<workername>/<device>", where the device field can be parsed as torch.device type.
E.g., "trainer0/cpu", "trainer0", "ps0/cuda:0".
In addition, the device field can be optional and the default value is "cpu".

View File

@ -48,7 +48,7 @@ demonstration of these components in action:
return clamp_1
"""
The **symbolic tracer** performs symbolic execution of the Python
The **symbolic tracer** performs "symbolic execution" of the Python
code. It feeds fake values, called Proxies, through the code. Operations
on theses Proxies are recorded. More information about symbolic tracing
can be found in the :func:`symbolic_trace` and :class:`Tracer`
@ -63,13 +63,13 @@ IR is the format on which transformations are applied.
**Python code generation** is what makes FX a Python-to-Python (or
Module-to-Module) transformation toolkit. For each Graph IR, we can
create valid Python code matching the Graphs semantics. This
create valid Python code matching the Graph's semantics. This
functionality is wrapped up in :class:`GraphModule`, which is a
:class:`torch.nn.Module` instance that holds a :class:`Graph` as well as a
``forward`` method generated from the Graph.
Taken together, this pipeline of components (symbolic tracing
intermediate representation transforms Python code generation)
Taken together, this pipeline of components (symbolic tracing ->
intermediate representation -> transforms -> Python code generation)
constitutes the Python-to-Python transformation pipeline of FX. In
addition, these components can be used separately. For example,
symbolic tracing can be used in isolation to capture a form of

View File

@ -488,7 +488,7 @@ class {module_name}(torch.nn.Module):
# constiuent substrings of the error message
tb_repr = traceback.format_exc()
custom_msg = ("Call using an FX-traced Module, "
f"line {err_lineno} of the traced Modules "
f"line {err_lineno} of the traced Module's "
"generated forward function:")
before_err = "".join(all_src_lines[err_lineno - 2 : err_lineno])
marker = "~" * err_line_len + "~~~ <--- HERE"

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
import sys
import torch

View File

@ -1,4 +1,4 @@
# coding=utf-8
# -*- coding: utf-8 -*-
import math
import warnings

View File

@ -1,4 +1,4 @@
# coding=utf-8
# -*- coding: utf-8 -*-
from .module import Module
from .. import functional as F

View File

@ -50,7 +50,7 @@ def export(model, args, f, export_params=True, verbose=False, training=TrainingM
1. ONLY A TUPLE OF ARGUMENTS or torch.Tensor::
args = (x, y, z)'
"args = (x, y, z)"
The inputs to the model, e.g., such that ``model(*args)`` is a valid invocation
of the model. Any non-Tensor arguments will be hard-coded into the exported model;
@ -60,11 +60,11 @@ def export(model, args, f, export_params=True, verbose=False, training=TrainingM
2. A TUPLE OF ARGUEMENTS WITH A DICTIONARY OF NAMED PARAMETERS::
args = (x,
"args = (x,
{
y: input_y,
z: input_z
})
'y': input_y,
'z': input_z
})"
The inputs to the model are structured as a tuple consisting of
non-keyword arguments and the last value of this tuple being a dictionary
@ -82,20 +82,20 @@ def export(model, args, f, export_params=True, verbose=False, training=TrainingM
return x
m = Model()
k =torch.randn(2, 3)
x = {torch.tensor(1.):torch.randn(2, 3)}
k = torch.randn(2, 3)
x = {torch.tensor(1.): torch.randn(2, 3)}
In the previous iteration, the call to export API would look like
torch.onnx.export(model, (k, x), test.onnx)
torch.onnx.export(model, (k, x), 'test.onnx')
This would work as intended. However, the export function
would now assume that the x input is intended to represent the optional
would now assume that the `x` input is intended to represent the optional
dictionary consisting of named arguments. In order to prevent this from being
an issue a constraint is placed to provide an empty dictionary as the last
input in the tuple args in such cases. The new call would look like this.
torch.onnx.export(model, (k, x, {}), test.onnx)
torch.onnx.export(model, (k, x, {}), 'test.onnx')
f: a file-like object (has to implement fileno that returns a file descriptor)
or a string containing a file name. A binary Protobuf will be written

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
from typing import Dict, List
from ._glob_group import GlobPattern, _GlobGroup

View File

@ -128,7 +128,7 @@ def quantize_jit(model, qconfig_dict, run_fn, run_args, inplace=False, debug=Fal
`model`: input float TorchScript model
`qconfig_dict`: qconfig_dict is a dictionary with names of sub modules as key and
qconfig for that module as value, empty key means the qconfig will be applied
to whole model unless its overwritten by more specific configurations, the
to whole model unless it's overwritten by more specific configurations, the
qconfig for each module is either found in the dictionary or fallback to
the qconfig of parent module.

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
"""End-to-end example to test a PR for regressions:
$ python -m examples.end_to_end --pr 39850