mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Summary: * adds TORCH_API and AT_CUDA_API in places * refactor code generation Python logic to separate caffe2/torch outputs * fix hip and asan * remove profiler_cuda from hip * fix gcc warnings for enums * Fix PythonOp::Kind Pull Request resolved: https://github.com/pytorch/pytorch/pull/19554 Differential Revision: D15082727 Pulled By: kostmo fbshipit-source-id: 83a8a99717f025ab44b29608848928d76b3147a4
33 lines
999 B
C++
33 lines
999 B
C++
#pragma once
|
|
|
|
#include <ATen/ATen.h>
|
|
#include <torch/csrc/WindowsTorchApiMacro.h>
|
|
#include <ATen/cuda/ATenCUDAGeneral.h>
|
|
#include <ATen/cuda/CUDAContext.h>
|
|
#include <c10/util/Optional.h>
|
|
|
|
#include <cstddef>
|
|
#include <vector>
|
|
|
|
namespace torch { namespace cuda {
|
|
|
|
using tensor_list2d = std::vector<std::vector<at::Tensor>>;
|
|
|
|
TORCH_API std::vector<at::Tensor> broadcast(const at::Tensor& tensor, at::IntArrayRef devices);
|
|
TORCH_API tensor_list2d broadcast_coalesced(at::TensorList tensors, at::IntArrayRef devices,
|
|
size_t buffer_size);
|
|
|
|
TORCH_API std::vector<at::Tensor> scatter(
|
|
const at::Tensor& tensor,
|
|
at::IntArrayRef devices,
|
|
const c10::optional<std::vector<int64_t>>& chunk_sizes = c10::nullopt,
|
|
int64_t dim = 0,
|
|
const c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>>& streams =
|
|
c10::nullopt);
|
|
|
|
TORCH_API at::Tensor gather(
|
|
at::TensorList tensors,
|
|
int64_t dim,
|
|
c10::optional<int32_t> destination_index);
|
|
}}
|