diff --git a/torch/csrc/inductor/aoti_runtime/arrayref_tensor.h b/torch/csrc/inductor/aoti_runtime/arrayref_tensor.h index a864dbf45bf4..436ed3f01f2f 100644 --- a/torch/csrc/inductor/aoti_runtime/arrayref_tensor.h +++ b/torch/csrc/inductor/aoti_runtime/arrayref_tensor.h @@ -154,6 +154,10 @@ class MiniArrayRef final { using MiniIntArrayRef = MiniArrayRef; +static_assert( + sizeof(MiniIntArrayRef) == sizeof(void*) + sizeof(size_t), + "changing the size of MiniArrayRef breaks ABI compatibility!"); + inline bool is_contiguous_strides_for_shape( int64_t ndim, const int64_t* strides_ptr, @@ -189,8 +193,7 @@ class ArrayRefTensor { sizes_(sizes), strides_(strides), device_type_(device_type), - device_idx_(device_idx), - numel_(arr.size()) { + device_idx_(device_idx) { assert(sizes.size() == strides.size()); assert(is_contiguous_strides_for_shape( sizes.size(), strides.data(), sizes.data())); @@ -242,7 +245,7 @@ class ArrayRefTensor { } auto numel() const { - return numel_; + return arrayRef_.size(); } void set_arrayref(MiniArrayRef new_arrayref) { @@ -257,9 +260,17 @@ class ArrayRefTensor { MiniArrayRef strides_; int32_t device_type_ = 0; int32_t device_idx_ = 0; - int32_t numel_ = 0; + // We continue to zero-initialize this field in case we repurpose + // the space later; having predictable contents can only help. + int32_t unusedDoNotRemoveForABICompatibility_ = 0; }; +static_assert( + sizeof(ArrayRefTensor) == + 3 * sizeof(MiniIntArrayRef) + 3 * sizeof(int32_t) + + (alignof(ArrayRefTensor) > 4 ? sizeof(int32_t) : 0), + "changing the size of ArrayRefTensor breaks ABI compatibility!"); + inline AtenTensorHandle reinterpret_tensor_wrapper( AtenTensorHandle self, int64_t ndim,