Revert "Fix typos."

This reverts commit 4622b3395276b37e10141fab43ffea33941ca0c2.
This commit is contained in:
Gregory Chanan
2017-08-10 10:17:30 -07:00
committed by gchanan
parent 1199e3d496
commit 50c208a50b
36 changed files with 44 additions and 44 deletions

View File

@ -542,7 +542,7 @@ mark_as_advanced(
# Makefile and similar generators don't define CMAKE_CONFIGURATION_TYPES, so we
# need to add another entry for the CMAKE_BUILD_TYPE. We also need to add the
# standerd set of 4 build types (Debug, MinSizeRel, Release, and RelWithDebInfo)
# for completeness. We need run this loop in order to accommodate the addition
# for completeness. We need run this loop in order to accomodate the addition
# of extra configuration types. Duplicate entries will be removed by
# REMOVE_DUPLICATES.
set(CUDA_configuration_types ${CMAKE_CONFIGURATION_TYPES} ${CMAKE_BUILD_TYPE} Debug MinSizeRel Release RelWithDebInfo)

View File

@ -168,7 +168,7 @@ if(CUDA_VERSION VERSION_LESS "3.0")
# CMake policy 0007 NEW states that empty list elements are not
# ignored. I'm just setting it to avoid the warning that's printed.
cmake_policy(SET CMP0007 NEW)
# Note that this will remove all occurrences of -G.
# Note that this will remove all occurances of -G.
list(REMOVE_ITEM depends_CUDA_NVCC_FLAGS "-G")
cmake_policy(POP)
endif()

View File

@ -2545,7 +2545,7 @@ class TestNN(NNTestCase):
self.assertEqual(input_cpu.grad, input_cuda.grad)
self.assertEqual(grid_cpu.grad, grid_cuda.grad)
# check that zero-dimensional input strides don't error out
# check that zero-dimensional input strides dont error out
base_input = torch.randn(C, IH, IW)
input_cpu = Variable(base_input.expand(input_cuda.size()), requires_grad=True)
grid_cpu = Variable(torch.randn(N, H, W, 2), requires_grad=True)

View File

@ -4009,7 +4009,7 @@ class TestTorch(TestCase):
expected = torch.arange(1, 126).view(5, 5, 5)[:, 1]
self.assertEqual(torch.from_numpy(x), expected)
# check zero dimensional
# check zero dimentional
x = np.zeros((0, 2))
self.assertRaises(RuntimeError, lambda: torch.from_numpy(x))

View File

@ -26,7 +26,7 @@ class Backend(object):
self.loading_lock = threading.Lock()
def load(self):
# This looks a little weird, but it's necessary for thread safe loading.
# This looks a little weird, but it's neccesary for thread safe loading.
# Loading the backend can take some time, so multiple threads can enter
# the if clause. We have to ensure that only the first one to acquire
# the lock will actually load the backend, and that the rest won't

View File

@ -1817,7 +1817,7 @@ along a given dimension.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
A tuple of `(values, indices)` is returned, where the `indices` is the indices
of the kth-smallest element in the original `input` Tensor in dimension `dim`.
of the kth-smallest element in the original `input` Tensor in dimention `dim`.
If :attr:`keepdim` is true, both the :attr:`values` and :attr:`indices` Tensors
are the same size as :attr:`input`, except in the dimension :attr:`dim` where

View File

@ -48,7 +48,7 @@ def backward(variables, grad_variables=None, retain_graph=None, create_graph=Non
The graph is differentiated using the chain rule. If any of ``variables``
are non-scalar (i.e. their data has more than one element) and require
gradient, the function additionally requires specifying ``grad_variables``.
gradient, the function additionaly requires specifying ``grad_variables``.
It should be a sequence of matching length, that contains gradient of
the differentiated function w.r.t. corresponding variables (``None`` is an
acceptable value for all variables that don't need gradient tensors).

View File

@ -142,7 +142,7 @@ class _SelectionFunction(Function):
has_all_reduce = True
# additional_args is prepended before dim when calling the tensor
# function. It's a no-op for subclasses other than kthvalue.
# kthvalue not only requires us to pass a dim, but also precede it with k.
# kthvalue not only requires us to pass a dim, but also preceed it with k.
@classmethod
def forward(cls, ctx, input, dim=None, keepdim=None, additional_args=tuple()):

View File

@ -130,7 +130,7 @@ class Variable(_C._VariableBase):
The graph is differentiated using the chain rule. If the variable is
non-scalar (i.e. its data has more than one element) and requires
gradient, the function additionally requires specifying ``gradient``.
gradient, the function additionaly requires specifying ``gradient``.
It should be a tensor of matching type and location, that contains
the gradient of the differentiated function w.r.t. ``self``.

View File

@ -1,7 +1,7 @@
#include "torch/csrc/cuda/THCP.h"
// Declare/Define the expansion functions that have THCState. Note that we
// still need to define the CPU-type versions because the copy functions that
// still need to define the CPU-type verisons because the copy functions that
// copy from GPU to CPU type have a THCState.
#define CUDA_EXPAND 1

View File

@ -1384,7 +1384,7 @@ static PyObject * THPTensor_(getValue)(THPTensor *self, PyObject *index)
return THPUtils_(newReal)(THStorage_(get)(LIBRARY_STATE sresult, storage_offset));
}
}
THPUtils_setError("An unknown error has occurred when indexing a tensor "
THPUtils_setError("An unknown error has occured when indexing a tensor "
"in THPTensor_(getValue). Please report this in a github issue at: "
"https://github.com/pytorch/pytorch");
return NULL;
@ -1482,7 +1482,7 @@ static int THPTensor_(setValue)(THPTensor *self, PyObject *index, PyObject *valu
}
return 0;
}
THPUtils_setError("An unknown error has occurred when indexing a tensor "
THPUtils_setError("An unknown error has occured when indexing a tensor "
"in THPTensor_(setValue). Please report this in a github issue at: "
"https://github.com/pytorch/pytorch");
return -1;

View File

@ -98,7 +98,7 @@ PyObject * THPTensor_(toNumpy)(THPTensor *self, PyObject *args) {
strides.get(), self->cdata->storage->data + self->cdata->storageOffset,
0, NPY_ARRAY_ALIGNED | NPY_ARRAY_WRITEABLE | NPY_ARRAY_C_CONTIGUOUS, nullptr));
if (!array) {
THPUtils_setError("an error occurred during conversion to numpy array");
THPUtils_setError("an error occured during conversion to numpy array");
return NULL;
}
@ -108,7 +108,7 @@ PyObject * THPTensor_(toNumpy)(THPTensor *self, PyObject *args) {
Py_INCREF(self);
if (PyArray_SetBaseObject((PyArrayObject*)(array.get()), (PyObject*)self) == -1) {
Py_DECREF(self);
THPUtils_setError("an error occurred during conversion to numpy array");
THPUtils_setError("an error occured during conversion to numpy array");
return NULL;
}

View File

@ -138,7 +138,7 @@ def scatter(tensor, devices, chunk_sizes=None, dim=0, streams=None):
dim (int, optional): A dimension along which to chunk the tensor.
Returns:
A tuple containing chunks of the ``tensor``, spread across given
A tuple containing chunks of the ``tensor``, spread accross given
``devices``.
"""
if chunk_sizes is None:

View File

@ -1,6 +1,6 @@
"""
torch.distributed provides an MPI-like interface for exchanging tensor
data across multi-machine networks. It supports a few different backends
data accross multi-machine networks. It supports a few different backends
and initialization methods.
"""
import torch
@ -105,7 +105,7 @@ class _DistributedRequest(object):
def get_rank():
"""Returns the rank of current process.
Rank is a unique identifier assigned to each process within a distributed
Rank is a unique identifier assigned to each process withing a distributed
group. They are always consecutive integers ranging from 0 to ``world_size``.
"""
assert torch.distributed._initialized
@ -192,7 +192,7 @@ def broadcast(tensor, src, group=group.WORLD):
def all_reduce(tensor, op=reduce_op.SUM, group=group.WORLD):
"""Reduces the tensor data across all machines in such a way that all get
"""Reduces the tensor data accross all machines in such a way that all get
the final result.
After the call ``tensor`` is going to be bitwise identical in all processes.
@ -210,7 +210,7 @@ def all_reduce(tensor, op=reduce_op.SUM, group=group.WORLD):
def reduce(tensor, dst, op=reduce_op.SUM, group=group.WORLD):
"""Reduces the tensor data across all machines.
"""Reduces the tensor data accross all machines.
Only the process with rank ``dst`` is going to receive the final result.

View File

@ -111,7 +111,7 @@ class LookupTable(Module):
if self.maxNorm is None:
return
# copy input into _input, so _input is continuous.
# copy input into _input, so _input is continous.
# The copied _input will be modified in the C code.
self._input.resize_(input.size()).copy_(input)
row_idx = self._input

View File

@ -21,7 +21,7 @@ class SpatialDropout(Module):
self.noise.bernoulli_(1 - self.p)
# We expand the random dropouts to the entire feature map because the
# features are likely correlated across the map and so the dropout
# features are likely correlated accross the map and so the dropout
# should also be correlated.
self.output.mul_(self.noise.expand_as(input))
else:

View File

@ -30,8 +30,8 @@ class Sum(Module):
def updateGradInput(self, input, gradOutput):
dimension = self._getPositiveDimension(input)
# zero-strides don't work with MKL/BLAS, so
# don't set self.gradInput to zero-stride tensor.
# zero-strides dont work with MKL/BLAS, so
# dont set self.gradInput to zero-stride tensor.
# Instead, do a deepcopy.
size = list(input.size())
size[dimension] = 1

View File

@ -19,7 +19,7 @@ class VolumetricDropout(Module):
self.noise.bernoulli_(1 - self.p)
# We expand the random dropouts to the entire feature map because the
# features are likely correlated across the map and so the dropout
# features are likely correlated accross the map and so the dropout
# should also be correlated.
self.output.mul_(self.noise.expand_as(input))
else:

View File

@ -12,7 +12,7 @@ def asgd(opfunc, x, config, state=None):
eta_t = eta0 / (1 + lambda eta0 t) ^ 0.75
mu_t = 1/max(1,t-t0)
implements ASGD algorithm as in L.Bottou's sgd-2.0
implements ASGD algoritm as in L.Bottou's sgd-2.0
ARGS:

View File

@ -38,7 +38,7 @@ public:
#undef DEFINE_IMPLICIT_CTOR
// return a new scalar that is guaranteed to be not backed by a tensor.
// return a new scalar that is guarenteed to be not backed by a tensor.
Scalar local() {
if (Tag::HAS_t != tag) {
return *this;

View File

@ -154,7 +154,7 @@ IF(C_SSE3_FOUND)
MESSAGE(STATUS "SSE3 Found")
SET(CMAKE_C_FLAGS "${C_SSE3_FLAGS} -DUSE_SSE3 ${CMAKE_C_FLAGS}")
ENDIF(C_SSE3_FOUND)
# we don't set -mavx and -mavx2 flags globally, but only for specific files
# we dont set -mavx and -mavx2 flags globally, but only for specific files
# however, we want to enable the AVX codepaths, so we still need to
# add USE_AVX and USE_AVX2 macro defines
IF(C_AVX_FOUND)

View File

@ -1345,7 +1345,7 @@ static void THFloatVector_divs_VSX(float *y, const float*x, const float c, const
// TODO
//
//
// Finished running all tests. All tests PASSED.
// Finished runnning all tests. All tests PASSED.
//
//------------------------------------------------
#ifdef RUN_VSX_TESTS
@ -2509,7 +2509,7 @@ int main()
printf("Finished running all tests. All tests PASSED.\n");
printf("Finished runnning all tests. All tests PASSED.\n");
return 0;
}

View File

@ -22,7 +22,7 @@
THC_API THAllocator THCCachingHostAllocator;
// Records an event in the specified stream. The allocation 'ptr' will not be
// re-used until the event has occurred.
// re-used until the event has occured.
THC_API cudaError_t THCCachingHostAllocator_recordEvent(void *ptr, THCStream *stream);
// Releases cached pinned memory allocations via cudaHostFree

View File

@ -663,7 +663,7 @@ THC_API void THCudaTensor_conv2DRevgerm(THCState *state, THCudaTensor *output, f
float *output_data = THCudaTensor_data(state, output);
// kernel is called multiple times
// (the arbitrary split below is just here to make sure we don't go over 256 threads)
// (the arbitrary split below is just here to make sure we dont go over 256 threads)
for (int sl=0; sl<nbatch; sl+=6) {
// auto compute nb of blocks and threads
dim3 blocks(nKernelPlane, nInputPlane);

View File

@ -245,7 +245,7 @@ __global__ void computeMode(
// Finally, we need to find the "an" index of the mode in the input Tensor. The API does
// not constrain which index we pick, so it can be any of the indices that contain the mode.
// We will do a reduction to find the index. We go back to using the (index, flag) buffer
// arrangement. First, we mark indices that are equal to the mode, i.e B[i] = true if
// arrangment. First, we mark indices that are equal to the mode, i.e B[i] = true if
// input[i] == mode, and initialize C[i] to be the index
//
// Again we reduce 2 elements in the thread's registers prior to the block-wide reduction

View File

@ -105,7 +105,7 @@ __global__ void cunn_LookupTable_accGradParametersKernel(
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// If the preceeding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//

View File

@ -156,7 +156,7 @@ void THNN_(SparseLinear_accGradParameters)(
THCTensor_(select)(state, sel, input, 1, 0); // rowInds
THCTensor_(select)(state, cols, input, 1, 1); // colInds
THCTensor_(cadd)(state, buf, sel, batchnum, cols); // colInds * buatchdim + rowInds
THCTensor_(sort)(state, buf, inds, buf, 0, 0); // Indices are now in ind
THCTensor_(sort)(state, buf, inds, buf, 0, 0); // Indicies are now in ind
THCTensor_(indexSelect)(state, buf, input, 0, inds);
THCTensor_(resize1d)(state, values, nnz);

View File

@ -70,7 +70,7 @@ void Store::StoreDeamon::deamon() {
try {
query(rank);
} catch (...) {
// There was an error when processing query. Probably an exception occurred in
// There was an error when processing query. Probably an exception occured in
// recv/send what would indicate that socket on the other side has been closed.
// If the closing was due to normal exit, then the store should exit too.
// Otherwise, if it was different exception, other processes will get
@ -190,7 +190,7 @@ void Store::wait(const std::vector<std::string>& keys) {
for (std::size_t i = 0; i < nkeys; i++) {
send_string(_socket, keys[i], (i != (nkeys - 1)));
}
// after sending the query, wait for a 'stop_waiting' response
// after sending the query, wait for a 'stop_waiting' reponse
QueryType qr;
recv_bytes<QueryType>(_socket, &qr, 1);
if (qr != QueryType::STOP_WAITING)

View File

@ -177,7 +177,7 @@ std::vector<MulticastMessage> getMessages(struct sockaddr* addr, rank_type world
if (recv_message_str == packed_msg) continue; // ignore multicast loopback
// We should ignore messages coming from different group
// We should ignore messages comming from different group
auto recv_msg = MulticastMessage(recv_message_str);
if (recv_msg.group_name != group_name) {
continue;

View File

@ -23,7 +23,7 @@ overview of these domains and how they interact.
## Overview
Gloo algorithms are collective algorithms, meaning they can run in
Gloo algorithms are collective algoritms, meaning they can run in
parallel across two or more processes/machines. To be able to execute
across multiple machines, they first need to find each other. We call
this _rendezvous_ and it is the first thing to address when

View File

@ -58,7 +58,7 @@ rendezvous to a particular namespace.
There are many more key/value stores that can be used for rendezvous
(e.g. [etcd](https://coreos.com/etcd) or [ZooKeeper](https://zookeeper.apache.org/)).
As long as a C or C++ interface for your store of choice is available,
As long as a C or C++ interface for your store of choice is availabe,
is relatively easy to hook it up to the Gloo rendezvous process.
See the `gloo::rendezvous::Store` abstract base class for the interface to implement.

View File

@ -58,7 +58,7 @@ protected:
bytes_received += step_received;
buffer += step_received;
} else if (pfd.revents & (POLLERR | POLLHUP)) {
throw std::runtime_error("An error occurred while waiting for the data");
throw std::runtime_error("An error occured while waiting for the data");
} else {
throw std::runtime_error("Shared memory manager connection has timed out");
}

View File

@ -46,7 +46,7 @@ typedef enum { ncclSuccess = 0,
/* Generates a unique Id with each call. Used to generate commId for
* ncclCommInitAll. uniqueId will be created in such a way that it is
* guaranteed to be unique across the host. */
* guaranteed to be unique accross the host. */
ncclResult_t ncclGetUniqueId(ncclUniqueId* uniqueId);
ncclResult_t pncclGetUniqueId(ncclUniqueId* uniqueId);

View File

@ -23,7 +23,7 @@
* PostFlag. The primitive routines wait for all WaitFlag args to attain
* at least a value of SUBSTEPS*(step-1)+substep+1 (i.e. completion of
* corresponding substep by previous step) before executing the transfer.
* After each substep is transferred, all PostFlag arguments get updated to
* After each substep is transfered, all PostFlag arguments get updated to
* the value SUBSTEPS*step+substep+1.
*/

View File

@ -10,7 +10,7 @@ import torch.utils.hooks as hooks
def _addindent(s_, numSpaces):
s = s_.split('\n')
# don't do anything for single-line stuff
# dont do anything for single-line stuff
if len(s) == 1:
return s_
first = s.pop(0)

View File

@ -6,7 +6,7 @@ from ._functions import Scatter, Gather
def scatter(inputs, target_gpus, dim=0):
"""
Slices variables into approximately equal chunks and
distributes them across given GPUs. Duplicates
distributes them accross given GPUs. Duplicates
references to objects that are not variables. Does not
support Tensors.
"""