diff --git a/android/README.md b/android/README.md index 6b8000c13fcc..f0c74750522d 100644 --- a/android/README.md +++ b/android/README.md @@ -2,7 +2,7 @@ ## Demo applications and tutorials -Please refer to [pytorch-labs/executorch-examples](https://github.com/pytorch-labs/executorch-examples/tree/main/dl3/android/DeepLabV3Demo) for the Android demo app based on [ExecuTorch](https://github.com/pytorch/executorch). +Please refer to [meta-pytorch/executorch-examples](https://github.com/meta-pytorch/executorch-examples/tree/main/dl3/android/DeepLabV3Demo) for the Android demo app based on [ExecuTorch](https://github.com/pytorch/executorch). Please join our [Discord](https://discord.com/channels/1334270993966825602/1349854760299270284) for any questions. diff --git a/aten/src/ATen/native/cuda/int4mm.cu b/aten/src/ATen/native/cuda/int4mm.cu index 272eb9b9c564..5444bb57eba7 100644 --- a/aten/src/ATen/native/cuda/int4mm.cu +++ b/aten/src/ATen/native/cuda/int4mm.cu @@ -1304,7 +1304,7 @@ at::Tensor _convert_weight_to_int4pack_cuda( constexpr int32_t kKTileSize = 16; // GPT-FAST assumes nTileSize of 8 for quantized weight tensor. - // See https://github.com/pytorch-labs/gpt-fast/blob/091515ab5b06f91c0d6a3b92f9c27463f738cc9b/quantize.py#L510 + // See https://github.com/meta-pytorch/gpt-fast/blob/091515ab5b06f91c0d6a3b92f9c27463f738cc9b/quantize.py#L510 // Torch dynamo also requires the torch ops has the same output shape for each device. // See https://github.com/pytorch/pytorch/blob/ec284d3a74ec1863685febd53687d491fd99a161/torch/_meta_registrations.py#L3263 constexpr int32_t kNTileSizeTensor = 8; diff --git a/torch/testing/_internal/common_quantization.py b/torch/testing/_internal/common_quantization.py index 211b282c4fc4..f8671379950e 100644 --- a/torch/testing/_internal/common_quantization.py +++ b/torch/testing/_internal/common_quantization.py @@ -611,7 +611,7 @@ def _group_quantize_tensor_symmetric(w, n_bit=4, groupsize=32): def _dynamically_quantize_per_channel(x, quant_min, quant_max, target_dtype): - # source: https://github.com/pytorch-labs/gpt-fast/blob/main/quantize.py + # source: https://github.com/meta-pytorch/gpt-fast/blob/main/quantize.py # default setup for affine quantization of activations x_dtype = x.dtype x = x.float()