Fix some typos.

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/75561
Approved by: https://github.com/albanD
This commit is contained in:
Yulv-git
2022-04-11 21:55:59 +00:00
committed by PyTorch MergeBot
parent 8e365fabc9
commit ac2d2e3a3d
28 changed files with 32 additions and 32 deletions

View File

@ -94,7 +94,7 @@ jobs:
)
docker exec -t "${container_name}" sh -c 'sudo chown -R jenkins . && sudo chown -R jenkins /dev && .jenkins/pytorch/build.sh'
# !{{ common_android.upload_androind_binary_size("", "")}}
# !{{ common_android.upload_android_binary_size("", "")}}
- name: Test
# Time out the test phase after 3.5 hours
timeout-minutes: 210

View File

@ -26,7 +26,7 @@ if [[ "${BUILD_ENVIRONMENT}" == *rocm* ]]; then
fi
fi
# /usr/local/caffe2 is where the cpp bits are installed to in in cmake-only
# /usr/local/caffe2 is where the cpp bits are installed to in cmake-only
# builds. In +python builds the cpp tests are copied to /usr/local/caffe2 so
# that the test code in .jenkins/test.sh is the same
INSTALL_PREFIX="/usr/local/caffe2"

View File

@ -23,7 +23,7 @@ import java.util.Locale;
* methods.
*
* <p>When constructing {@code Tensor} objects with {@code data} as an array, it is not specified
* whether this data is is copied or retained as a reference so it is recommended not to modify it
* whether this data is copied or retained as a reference so it is recommended not to modify it
* after constructing. {@code data} passed as a {@link Buffer} is not copied, so it can be modified
* between {@link Module} calls to avoid reallocation. Data retrieved from {@code Tensor} objects
* may be copied or may be a reference to the {@code Tensor}'s internal data buffer. {@code shape}

View File

@ -200,7 +200,7 @@ struct BoxedKernelWrapper<
// 3. in-place ops take a single non-const Tensor reference
// as their first argument, and return it.
//
// Note: all signatures matching this pattern are are assumed to be for such ops.
// Note: all signatures matching this pattern are assumed to be for such ops.
// Because of this, the generated BoxedKernelWrapper specializations simply
// return the in-place argument.
//
@ -260,7 +260,7 @@ struct BoxedKernelWrapper<
// 4. out of place ops that take a single non-const Tensor reference as their
// final argument, and also return it.
//
// Note: all signatures matching this pattern are are assumed to be for such ops.
// Note: all signatures matching this pattern are assumed to be for such ops.
// This assumption permits the generated BoxedKernelWrapper specializations to simply
// return out arguments.
//
@ -300,7 +300,7 @@ struct BoxedKernelWrapper<
// 5. out of place ops that take multiple non-const Tensor references as their
// final arguments, and return them in a std::tuple.
//
// Note: all signatures matching this pattern are are assumed to be for such ops.
// Note: all signatures matching this pattern are assumed to be for such ops.
// This assumption permits the generated BoxedKernelWrapper specializations to simply
// return the out arguments.
//

View File

@ -2158,7 +2158,7 @@ inline bool IValue::isSameIdentity(const IValue& rhs) const {
// Str) return value equality
// 2. If it is a tensor type, we need to take undefined tensor into account
// 3. Undefined_tensor is None and vice versa should be true
// 4. If it is a reference type (i.e. is_intrusive_ptr), then is is True when
// 4. If it is a reference type (i.e. is_intrusive_ptr), then is True when
// the pointed-to object is the same.
// 5. False for all other comparisons.
if (this->isNone() && rhs.isNone()) {

View File

@ -12,7 +12,7 @@
// It implements various functions with a simple interface
// For example it enables the user to call vsin(float* out, const float* in,
// size) This functions takes a pointer to a contious output array of floats and
// a constant input array. It will then apply sin to each value in in the input
// a constant input array. It will then apply sin to each value in the input
// array and write the result into the output array. out and in may point to the
// same memory, i.e. this fully supports in-place operations. These functions
// also implement their own parallelization, so take precautions when calling

View File

@ -1107,7 +1107,7 @@ Tensor& _linalg_inv_out_helper_cpu(Tensor &result, Tensor& infos_lu, Tensor& inf
return result;
}
// Computes the inverse matrix of 'input', it is is saved to 'result' in-place
// Computes the inverse matrix of 'input', it is saved to 'result' in-place
// LAPACK/MAGMA/cuSOLVER error codes are saved in 'infos' tensors, they are not checked here
static Tensor& linalg_inv_out_info(Tensor& result, Tensor& infos_lu, Tensor& infos_getri, const Tensor& input) {
squareCheckInputs(input, "linalg.inv");
@ -1196,7 +1196,7 @@ static Tensor& linalg_inv_out_info(Tensor& result, Tensor& infos_lu, Tensor& inf
return result;
}
// Computes the inverse matrix of 'input', it is is saved to 'result' in-place
// Computes the inverse matrix of 'input', it is saved to 'result' in-place
Tensor& linalg_inv_out(const Tensor &input, Tensor &result) {
auto info_shape = IntArrayRef(input.sizes().cbegin(), input.sizes().cend() - 2); // input.shape[:-2]
auto infos_lu = at::zeros({info_shape}, input.options().dtype(kInt));

View File

@ -4,7 +4,7 @@ from torch.nn.parallel import DistributedDataParallel as DDP
def basic_ddp_model(self, rank, model, process_group, hook_state, hook):
r"""
A function that creates a ddp_model and hook_state objects.
The ddp model is is initialized with a single device id and
The ddp model is initialized with a single device id and
the process group. The ddp_model also registers the communication
hook.
Args:

View File

@ -56,7 +56,7 @@ Works only with Python3.\n A few examples:
"--input-iter",
type=str,
default=None,
help="a comma separated list of of Tensor dimensions that includes a start, \
help="a comma separated list of Tensor dimensions that includes a start, \
stop, and increment that can be constant or a power of 2 \
{start:stop:inc,start:stop:pow2}",
)

View File

@ -19,7 +19,7 @@ N = maximum sequence length
B = batch size
M = hidden size
set each element of INPUT to zero if it is is past the end of the
set each element of INPUT to zero if it is past the end of the
corresponding sequence (i.e. if LENS[j] > i for an index (i,j,k)).
)DOC");

View File

@ -71,7 +71,7 @@ C10_DEFINE_bool(
namespace caffe2 {
namespace glow {
// The list in in the form of "0-3,5,6-7" which means, we will black list ops
// The list in the form of "0-3,5,6-7" which means, we will black list ops
// with net positions in [0,1,2,3,5,6,7]
std::unordered_set<int> ParseNetPositionList(const std::string& str) {
std::unordered_set<int> net_position_list;

View File

@ -133,7 +133,7 @@ def db_name(epoch, node_name, db_prefix, path_prefix=None):
node_name: A string. The name of the node.
db_prefix: A string. The prefix used to construct full db name.
path_prefix: A string. Optional param used to construct db name or path
where checkpoint files are are stored.
where checkpoint files are stored.
Returns:
db_name: A string. The absolute path of full_db_name where checkpoint
files are saved

View File

@ -269,7 +269,7 @@ class TestGradientCalculation(test_util.TestCase):
in -> out, with UseInput
in -> in
Since we overwrite in in op#1, but in will be needed by the gradient
Since we overwrite in op#1, but in will be needed by the gradient
calculation of op#0, the gradient registry should raise an error.
"""
operators = [

View File

@ -31,7 +31,7 @@ class GRUCell(rnn_cell.RNNCell):
# (reset gate -> output_gate)
# So, much of the logic to calculate the reset gate output and modified
# output gate input is set here, in the graph definition.
# The remaining logic lives in in gru_unit_op.{h,cc}.
# The remaining logic lives in gru_unit_op.{h,cc}.
def _apply(
self,
model,

View File

@ -64,7 +64,7 @@ class TestElementwiseBroadcast(serial.SerializedTestCase):
caffe2_op: A string. Name of the caffe operator to test.
op_function: an actual python operator (e.g. operator.add)
path_prefix: A string. Optional param used to construct db name or path
where checkpoint files are are stored.
where checkpoint files are stored.
"""
for X, Y, op_args, X_out, Y_out in self.__generate_test_cases(allow_broadcast_fastpath):

View File

@ -36,7 +36,7 @@ class FixedLearningRate : public LearningRateFunctor<T> {
};
// Alter: alternatate learning rate with active_period and inactive_period.
// update for for a duration of active_period and then stop for a duration of
// update for a duration of active_period and then stop for a duration of
// inactive_period if active_first, and vice versa
template <typename T>
class AlternateLearningRate : public LearningRateFunctor<T> {

View File

@ -16,7 +16,7 @@ will help you learn more about ``torch.package`` and how to use it.
.. warning::
This module depends on the ``pickle`` module which is is not secure. Only unpackage data you trust.
This module depends on the ``pickle`` module which is not secure. Only unpackage data you trust.
It is possible to construct malicious pickle data which will **execute arbitrary code during unpickling**.
Never unpackage data that could have come from an untrusted source, or that could have been tampered with.

View File

@ -155,7 +155,7 @@ This is the simplest to apply form of quantization where the weights are
quantized ahead of time but the activations are dynamically quantized
during inference. This is used for situations where the model execution time
is dominated by loading weights from memory rather than computing the matrix
multiplications. This is true for for LSTM and Transformer type models with
multiplications. This is true for LSTM and Transformer type models with
small batch size.
Diagram::

View File

@ -168,7 +168,7 @@ TEST(ShapeAnalysisTest, DynamicShapesFusion) {
/*
Test guard behaves correctly at runtime and symbolic shapes are computed
correctly. As we don't have have TE Kernel support for dynamic shapes we're
correctly. As we don't have TE Kernel support for dynamic shapes we're
going to return all of the computed runtime symbolic dimensions as outputs
of the graph on guard success, and return None on guard failure
*/

View File

@ -81,7 +81,7 @@ class JitBackendTestCase(JitTestCase):
# Subclasses are expected to set up three variables in their setUp methods:
# module - a regular, Python version of the module being tested
# scripted_module - a scripted version of module
# lowered_modle - a version of module lowered to a backend
# lowered_module - a version of module lowered to a backend
def check_function(self, function_name, input):
"""
@ -498,7 +498,7 @@ class JitBackendTestCaseWithCompiler(JitTestCase):
# Subclasses are expected to set up four variables in their setUp methods:
# module - a regular, Python version of the module being tested
# scripted_module - a scripted version of module
# lowered_modle - a version of module lowered to a backend
# lowered_module - a version of module lowered to a backend
# mobile_module - a module with a format that Pytorch Mobile can execute
def check_forward(self, input):

View File

@ -2179,7 +2179,7 @@ class TestQuantizedOps(TestCase):
torch_type, Y_scale, Y_zero_point, channels_last, \
affine = test_case
num_channels = num_groups * channels_per_group
# minimum rank for for channels_last
# minimum rank for channels_last
shapes = (batches, num_channels, elements_per_channel, 1)
# In the FP kernel, sums and sums of squares are calculated in floating point.

View File

@ -25,7 +25,7 @@ namespace example {
using NoTarget = void;
} // namespace example
/// A specialization for `Example` that does not have have a target.
/// A specialization for `Example` that does not have a target.
///
/// This class exists so that code can be written for a templated `Example`
/// type, and work both for labeled and unlabeled datasets.

View File

@ -112,7 +112,7 @@ void _process_forward_mode_AD(const variable_list &inputs,
const auto num_forward_grads = forward_grads.size();
// contrary to backward mode, we don't allow returning too many gradients
TORCH_CHECK(num_forward_grads == num_outputs, "Function's jvp returned "
"an invalid number of of forward gradients (expected ", num_outputs,
"an invalid number of forward gradients (expected ", num_outputs,
" but got ", num_forward_grads, ")");
for (const auto i : c10::irange(num_outputs)) {

View File

@ -576,7 +576,7 @@ struct CudaGraphFuser {
Value* producer_for_chunk = *it;
size_t producer_index = it - chunk->inputs().begin();
// all uses of the chunk must be in in this consumer
// all uses of the chunk must be in this consumer
for (auto s : chunk->outputs()) {
for (auto u : s->uses()) {
if (u.user != consumer)

View File

@ -66,7 +66,7 @@ When making changes to the operators, the first thing to identify is if it's BC/
### 2. Make changes to the operator and write an upgrader.
1. Make the operator change.
2. Write an upgrader in `torch/csrc/jit/operator_upgraders/upgraders_entry.cpp` file inside a map `kUpgradersEntryMap`. The softly enforced naming format is `<operator_name>_<operator_overload>_<start>_<end>`. The start and end means the upgrader can be applied to the operator exported during when [the global operator version](https://github.com/pytorch/pytorch/blob/master/caffe2/serialize/versions.h#L82) within the range `[start, end]`. Let's take an operator `linspace` with the overloaded name `out` as an example. The first thing is to check if the upgrader exists in in [upgraders_entry.cpp](https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/operator_upgraders/upgraders_entry.cpp).
2. Write an upgrader in `torch/csrc/jit/operator_upgraders/upgraders_entry.cpp` file inside a map `kUpgradersEntryMap`. The softly enforced naming format is `<operator_name>_<operator_overload>_<start>_<end>`. The start and end means the upgrader can be applied to the operator exported during when [the global operator version](https://github.com/pytorch/pytorch/blob/master/caffe2/serialize/versions.h#L82) within the range `[start, end]`. Let's take an operator `linspace` with the overloaded name `out` as an example. The first thing is to check if the upgrader exists in [upgraders_entry.cpp](https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/operator_upgraders/upgraders_entry.cpp).
1. If the upgrader doesn't exist in `upgraders_entry.cpp`, the upgrader name can be `linspace_out_0_{kProducedFileFormatVersion}`, where [`kProducedFileFormatVersion`](https://github.com/pytorch/pytorch/blob/master/caffe2/serialize/versions.h#L82) can be found in [versions.h](https://github.com/pytorch/pytorch/blob/master/caffe2/serialize/versions.h).
2. If the upgrader exist in `upgraders_entry.cpp`, for example `linspace_out_0_7` (means `linspace.out` operator is changed when operator version is bumped from 7 to 8),
1. If it's possible to write an upgrader valid for `linspace` before versioning bumping to 8, after versioning bumping to 8, write an upgrader `linspace_out_0_{kProducedFileFormatVersion}`

View File

@ -733,7 +733,7 @@ struct GraphFuser {
Value* producer_for_chunk = *it;
size_t producer_index = it - chunk->inputs().begin();
// all uses of the chunk must be in in this consumer
// all uses of the chunk must be in this consumer
for (auto s : chunk->outputs()) {
for (auto u : s->uses()) {
if (u.user != consumer)

View File

@ -257,7 +257,7 @@ void initMonitorBindings(PyObject* module) {
m,
"data_value_t",
R"DOC(
data_value_t is one of of ``str``, ``float``, ``int``, ``bool``.
data_value_t is one of ``str``, ``float``, ``int``, ``bool``.
)DOC");
py::implicitly_convertible<std::string, data_value_t>();

View File

@ -314,7 +314,7 @@ class class_ : public ::torch::detail::class_base {
def("__getstate__", std::forward<GetStateFn>(get_state));
// __setstate__ needs to be registered with some custom handling:
// We need to wrap the invocation of of the user-provided function
// We need to wrap the invocation of the user-provided function
// such that we take the return value (i.e. c10::intrusive_ptr<CurrClass>)
// and assign it to the `capsule` attribute.
using SetStateTraits =