mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[BE][7/16] fix typos in torch/ (torch/csrc/) (#156317)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/156317 Approved by: https://github.com/albanD ghstack dependencies: #156313, #156314, #156315, #156316
This commit is contained in:
committed by
PyTorch MergeBot
parent
b210cf1ea5
commit
ee72815f11
@ -1177,7 +1177,6 @@ exclude_patterns = [
|
||||
'torch/distributed/tensor/**',
|
||||
'torch/[j-o]*/**',
|
||||
'torch/utils/**',
|
||||
'torch/csrc/**',
|
||||
'torch/csrc/jit/**',
|
||||
'torch/csrc/jit/[a-o]*/**',
|
||||
'torch/csrc/[a-i]*/**',
|
||||
|
@ -12,8 +12,12 @@ fro
|
||||
froms
|
||||
hsa
|
||||
nd
|
||||
nin
|
||||
nout
|
||||
NowNs
|
||||
optins
|
||||
OT
|
||||
overrideable
|
||||
ptd
|
||||
rebuild
|
||||
rebuilt
|
||||
|
@ -329,7 +329,7 @@ struct PyWarningHandler {
|
||||
|
||||
/** Call if an exception has been thrown
|
||||
|
||||
* Necessary to determine if it is safe to throw from the desctructor since
|
||||
* Necessary to determine if it is safe to throw from the destructor since
|
||||
* std::uncaught_exception is buggy on some platforms and generally
|
||||
* unreliable across dynamic library calls.
|
||||
*/
|
||||
|
@ -101,7 +101,7 @@ PyObject* THPStorage_Wrap(c10::Storage storage) {
|
||||
// If the StorageImpl has a PyObject that is managed by a different
|
||||
// interpreter than the current one, create a new StorageImpl that points to
|
||||
// the same data and then create the Python storage from that.
|
||||
// NOTE: This is only supposed to happen in MultiPy
|
||||
// NOTE: This is only supposed to happen in MultiPy // codespell:ignore
|
||||
if (pyobj_slot->has_pyobj_nonhermetic() &&
|
||||
!pyobj_slot->check_interpreter(getPyInterpreter())) {
|
||||
return THPStorage_NewWithStorage(
|
||||
|
@ -20,7 +20,7 @@ using size_t = std::size_t;
|
||||
|
||||
class TORCH_API hash_t : public c10::uint128 {
|
||||
public:
|
||||
// Swich from typedef hash_t = uint128 to provide explicit casters
|
||||
// Switch from typedef hash_t = uint128 to provide explicit casters
|
||||
hash_t(int8_t val) : uint128(static_cast<uint32_t>(val)) {}
|
||||
hash_t(int16_t val) : uint128(static_cast<uint32_t>(val)) {}
|
||||
hash_t(int32_t val) : uint128(static_cast<uint32_t>(val)) {}
|
||||
@ -69,7 +69,7 @@ hash_t Hash(const T& value) {
|
||||
// breaks falling through to the templated arithmetic types above
|
||||
hash_t TORCH_API Hash(const std::vector<bool>& value);
|
||||
|
||||
// Specialiazed implementations for proprietary types
|
||||
// Specialized implementations for proprietary types
|
||||
static inline hash_t Hash(const c10::ScalarType& value) {
|
||||
return DataHash(&value, sizeof(value));
|
||||
}
|
||||
|
@ -1042,7 +1042,7 @@ std::vector<BackendDataPtr> LazyGraphExecutor::GatherTensorsData(
|
||||
void LazyGraphExecutor::TensorCollectionBarrier(SyncTensorCollection* coll) {
|
||||
if (coll) {
|
||||
static const std::string invalid_device(
|
||||
"Unknown0"); /* Temp solution to idetify unassigned devices */
|
||||
"Unknown0"); /* Temp solution to identify unassigned devices */
|
||||
if (coll->device.toString() == invalid_device || !coll->unlocker.empty()) {
|
||||
return;
|
||||
}
|
||||
|
@ -232,7 +232,7 @@ TORCH_API std::string CreateMetricReport(
|
||||
const std::vector<std::string>& metric_names);
|
||||
|
||||
// Returns the currently registered metric names. Note that the list can grow
|
||||
// since metrics are usually function intialized (they are static function
|
||||
// since metrics are usually function initialized (they are static function
|
||||
// variables).
|
||||
TORCH_API std::vector<std::string> GetMetricNames();
|
||||
|
||||
@ -241,7 +241,7 @@ TORCH_API std::vector<std::string> GetMetricNames();
|
||||
TORCH_API MetricData* GetMetric(const std::string& name);
|
||||
|
||||
// Returns the currently registered counter names. Note that the list can grow
|
||||
// since counters are usually function intialized (they are static function
|
||||
// since counters are usually function initialized (they are static function
|
||||
// variables).
|
||||
TORCH_API std::vector<std::string> GetCounterNames();
|
||||
|
||||
|
@ -60,9 +60,9 @@ class TORCH_API Shape {
|
||||
|
||||
// Sizes are the upper bound sizes for a tensor, used by XLA.
|
||||
std::vector<int64_t> sizes_;
|
||||
// Stores which dimmensions are symbolic
|
||||
// Stores which dimensions are symbolic
|
||||
// If nullopt, either it hasn't been initialized or the symbolic
|
||||
// dimmensions are not calculatable
|
||||
// dimensions are not calculable
|
||||
std::optional<std::vector<bool>> is_symbolic_ = std::nullopt;
|
||||
};
|
||||
|
||||
|
@ -73,7 +73,7 @@
|
||||
|
||||
namespace torch::lazy {
|
||||
|
||||
// Copied from ATen/native/utils/ParamUtils.h, which aparently I can't include
|
||||
// Copied from ATen/native/utils/ParamUtils.h, which apparently I can't include
|
||||
// from here?
|
||||
static std::vector<int64_t> expand_param_if_needed(
|
||||
at::IntArrayRef list_param,
|
||||
@ -281,7 +281,7 @@ std::vector<Shape> compute_shape_convolution(
|
||||
TORCH_CHECK(dim > 0, "weight should have at least three dimensions");
|
||||
|
||||
// at::convolution performs parameter expansion before running kernels on
|
||||
// expanded parameters we must do the same. Shape formulae access differnent
|
||||
// expanded parameters we must do the same. Shape formulae access different
|
||||
// dimensions of e.g. output_padding, but output_padding may be passed in as a
|
||||
// scalar. Sadly, accessing output_padding[1] in this case gives incorrect
|
||||
// results rather than indexing error
|
||||
|
@ -252,7 +252,7 @@ at::Tensor LazyTensor::ToTensor(bool detached) {
|
||||
tensor = *tensor_data;
|
||||
if (detached) {
|
||||
if (data()->ir_value || data()->handle != nullptr) {
|
||||
// If we have other authoritive sources, just drop our reference and
|
||||
// If we have other authoritative sources, just drop our reference and
|
||||
// transfer it to the caller.
|
||||
data()->tensor_data = std::nullopt;
|
||||
} else {
|
||||
|
@ -125,7 +125,7 @@ class TORCH_API LazyTensor : public c10::intrusive_ptr_target {
|
||||
|
||||
// Retrieves the IR Node representing this LazyTensor. One will be created if
|
||||
// missing. Note that although this is a const API, it actually changes the
|
||||
// internal state ofthe object.
|
||||
// internal state of the object.
|
||||
Value GetIrValue() const;
|
||||
|
||||
void SetIrValue(Value ir_value);
|
||||
@ -231,7 +231,7 @@ TORCH_API at::Tensor CreateAtenFromLtcTensor(LazyTensor&& ltc_tensor);
|
||||
// lazy tensors, then you should think of that function as an "entrypoint" to
|
||||
// functionalization, and use functionalize_output=true Examples include:
|
||||
// - factory functions (the LTC kernel for at::empty)
|
||||
// - CPU -> Lazy device converions (the LTC kernel for at::to_device)
|
||||
// - CPU -> Lazy device conversions (the LTC kernel for at::to_device)
|
||||
//
|
||||
// Case 2: lazy -> lazy
|
||||
// If you're implementing a function that takes in lazy tensors and returns
|
||||
|
@ -44,7 +44,7 @@ static std::ptrdiff_t GetTensorId(const at::Tensor& tensor) {
|
||||
static std::string GetTensorsDump(
|
||||
const std::vector<at::Tensor>& tensors,
|
||||
const std::function<std::string(c10::ArrayRef<const torch::lazy::Node*>)>&
|
||||
coverter) {
|
||||
converter) {
|
||||
std::vector<const torch::lazy::Node*> nodes;
|
||||
std::vector<torch::lazy::Value> values;
|
||||
for (auto& tensor : tensors) {
|
||||
@ -54,7 +54,7 @@ static std::string GetTensorsDump(
|
||||
values.push_back(lazy_tensor->GetIrValue());
|
||||
nodes.push_back(values.back().node.get());
|
||||
}
|
||||
return coverter(nodes);
|
||||
return converter(nodes);
|
||||
}
|
||||
|
||||
static std::vector<torch::lazy::LazyTensorPtr> GetLtcTensors(
|
||||
@ -146,18 +146,18 @@ void initLazyBindings(PyObject* module) {
|
||||
lazy.def(
|
||||
"_get_tensors_text",
|
||||
[](const std::vector<at::Tensor>& tensors) -> std::string {
|
||||
auto coverter = [](c10::ArrayRef<const torch::lazy::Node*> nodes) {
|
||||
auto converter = [](c10::ArrayRef<const torch::lazy::Node*> nodes) {
|
||||
return torch::lazy::DumpUtil::ToText(nodes);
|
||||
};
|
||||
return GetTensorsDump(tensors, coverter);
|
||||
return GetTensorsDump(tensors, converter);
|
||||
});
|
||||
lazy.def(
|
||||
"_get_tensors_dot",
|
||||
[](const std::vector<at::Tensor>& tensors) -> std::string {
|
||||
auto coverter = [](c10::ArrayRef<const torch::lazy::Node*> nodes) {
|
||||
auto converter = [](c10::ArrayRef<const torch::lazy::Node*> nodes) {
|
||||
return torch::lazy::DumpUtil::ToDot(nodes);
|
||||
};
|
||||
return GetTensorsDump(tensors, coverter);
|
||||
return GetTensorsDump(tensors, converter);
|
||||
});
|
||||
lazy.def(
|
||||
"_get_tensors_backend",
|
||||
@ -325,10 +325,11 @@ void initLazyBindings(PyObject* module) {
|
||||
#endif // !(defined(FBCODE_CAFFE2) || defined(OVRSOURCE))
|
||||
});
|
||||
|
||||
// GetPythonFramesFunction() has not ever worked with torchdeploy/multipy
|
||||
// possibly becuase GetPythonFrames resolves to external cpython rather
|
||||
// than embedded cpython. So far this problem has only been observed
|
||||
// internally, so we will just block it off there.
|
||||
// GetPythonFramesFunction() has not ever worked with
|
||||
// torchdeploy/multipy possibly because // codespell:ignore multipy
|
||||
// GetPythonFrames resolves to external cpython rather than embedded cpython.
|
||||
// So far this problem has only been observed internally, so we will just
|
||||
// block it off there.
|
||||
|
||||
#if !(defined(USE_DEPLOY))
|
||||
|
||||
|
@ -30,7 +30,7 @@ NodePtr DeviceData::Create(const std::shared_ptr<BackendData>& data) {
|
||||
// ReuseOrMakeNode may return a reused node which has the same shape,
|
||||
// however, we need to replace the old data_ with the new one.
|
||||
// Ditching the old data_ is safe because tracing is done iteration
|
||||
// by iteration, and after we lauch the async device execution for the
|
||||
// by iteration, and after we launch the async device execution for the
|
||||
// previous iteration, data_ in DeviceData nodes are not needed anymore.
|
||||
DeviceData* device_data = static_cast<DeviceData*>(node.get());
|
||||
device_data->SetData(data);
|
||||
|
@ -5,8 +5,8 @@
|
||||
namespace torch::lazy {
|
||||
|
||||
// This IR was copied from code-generated output, but the entire _to_copy
|
||||
// operator cannot be trivially code genereated since it is only desirable to
|
||||
// capture IR for certain permutaions of _to_copy (e.g. dtype), and for the
|
||||
// operator cannot be trivially code generated since it is only desirable to
|
||||
// capture IR for certain permutations of _to_copy (e.g. dtype), and for the
|
||||
// others it is difficult to even invoke the aten/eager fallback necessitating
|
||||
// directly implementing the right to(device) behavior
|
||||
class ToCopy : public torch::lazy::TsNode {
|
||||
|
@ -271,7 +271,7 @@ void ts_eager_fallback(
|
||||
// the temporary eager output tensor that we created.
|
||||
//
|
||||
// Note [Eager Fallback Does Not Handle View Operators]
|
||||
// Also note that we are incapable of handling immutable alises properly.
|
||||
// Also note that we are incapable of handling immutable aliases properly.
|
||||
// Why?
|
||||
// Schemas with an immutable alias'd tensor outputs correspond to view
|
||||
// operators. For example, the `view_as` schema from native_functions.yaml:
|
||||
@ -340,7 +340,7 @@ void ts_eager_fallback(
|
||||
// We should never hit this for a view op,
|
||||
// because LazyTensor should provide a lowering for the
|
||||
// corresponding view_copy operator. The functionalization pass will
|
||||
// take care of calling the view_copy operator intead of the view.
|
||||
// take care of calling the view_copy operator instead of the view.
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"The operator ",
|
||||
|
@ -398,7 +398,7 @@ at::Tensor LazyNativeFunctions::lift_fresh(const at::Tensor& tensor) {
|
||||
|
||||
// All of the below ops correspond to CompositeExplicitAutograd kernels from
|
||||
// core that call into view operators internally. These are all composite ops
|
||||
// that LTC can technically re-use / get for free, but we need to
|
||||
// that LTC can technically reuse / get for free, but we need to
|
||||
// "functionalize" them to remove the view ops before we can use them.
|
||||
at::Tensor LazyNativeFunctions::block_diag(at::TensorList tensors) {
|
||||
return at::functionalization::functionalize_aten_op<ATEN_OP(
|
||||
@ -529,7 +529,7 @@ at::Tensor LazyNativeFunctions::slice_backward_symint(
|
||||
std::move(step));
|
||||
}
|
||||
|
||||
// re-use the composite kernel from core, that way we don't need to provide a
|
||||
// reuse the composite kernel from core, that way we don't need to provide a
|
||||
// backwards formula for native_group_norm
|
||||
std::tuple<Tensor, Tensor, Tensor> LazyNativeFunctions::native_group_norm(
|
||||
const at::Tensor& input,
|
||||
|
@ -78,7 +78,7 @@ const OpKind tensor_list_opkind = OpKind::Get("lazy_tensors::tensor_list");
|
||||
// Note: shape is undefined for TensorList. We assert in some places that
|
||||
// #shapes matches #outputs and this stems from
|
||||
// the fact that currently all IR nodes represent tensors (there is no
|
||||
// type system for this IR). Becuase of this, TensorList is a bit of a
|
||||
// type system for this IR). Because of this, TensorList is a bit of a
|
||||
// hack.
|
||||
//
|
||||
// TODO(whc) once Shape() API is moved to Node base, also make it virtual, and
|
||||
|
@ -218,7 +218,7 @@ If we don't stop the trace after `optimizer_step` it will include two or more it
|
||||
Another important point is that after `mark_step()` we actually continue tracing the next iteration! And... start executing the previous one at the same time! Really, nothing stops us from tracing the next iteration ...and then the one after next until we hit `if batch_idx % log_interval == 0:` where
|
||||
we actually need to wait for execution to catch up, so we can print out `loss`. Remember to avoid accessing intermediate results too often if you would like to extract the maximum benefit out of Lazy Tensor.
|
||||
|
||||
Since every iteration looks exactly like the one before it, the TS backend will be re-using the same TS compilation.
|
||||
Since every iteration looks exactly like the one before it, the TS backend will be reusing the same TS compilation.
|
||||
|
||||
Alright, let's run it now!
|
||||
|
||||
|
@ -443,7 +443,7 @@ void initModule(PyObject* module) {
|
||||
}
|
||||
TORCH_CHECK(
|
||||
threads.has_value() && threads->size() < 4,
|
||||
"Number of threads is undefined or has wrong dimention");
|
||||
"Number of threads is undefined or has wrong dimension");
|
||||
TORCH_CHECK(
|
||||
!group_size.has_value() ||
|
||||
threads->size() == group_size->size());
|
||||
|
@ -58,7 +58,7 @@ struct RawTensors {
|
||||
|
||||
void calculateUniqueTensorIDs(
|
||||
std::vector<std::shared_ptr<Result>>& sorted_results) {
|
||||
// This task is equivilent to https://leetcode.com/problems/number-of-islands/
|
||||
// This task is equivalent to https://leetcode.com/problems/number-of-islands/
|
||||
// We first cluster events with a greedy index assignment, and then merge
|
||||
// groups that overlap.
|
||||
std::vector<RawTensorInfo> tensors;
|
||||
|
@ -35,7 +35,7 @@ using AllocationID = strong::type<
|
||||
strong::regular,
|
||||
strong::hashable>;
|
||||
|
||||
// We use a Tensor's TensorImpl adress and StorageImpl data start to build the
|
||||
// We use a Tensor's TensorImpl address and StorageImpl data start to build the
|
||||
// data flow graph. We do not hold an owning reference so we wrap them in strong
|
||||
// types to prevent direct access.
|
||||
using TensorImplAddress = strong::type<
|
||||
|
@ -13,7 +13,7 @@ using perf_counters_t = std::vector<uint64_t>;
|
||||
/* Standard list of performance events independent of hardware or backend */
|
||||
constexpr std::array<const char*, 2> ProfilerPerfEvents = {
|
||||
/*
|
||||
* Number of Processing Elelement (PE) cycles between two points of interest
|
||||
* Number of Processing Element (PE) cycles between two points of interest
|
||||
* in time. This should correlate positively with wall-time. Measured in
|
||||
* uint64_t. PE can be non cpu. TBD reporting behavior for multiple PEs
|
||||
* participating (i.e. threadpool).
|
||||
|
@ -394,7 +394,7 @@ void initPythonBindings(PyObject* module) {
|
||||
},
|
||||
[](const py::tuple& t) { // __setstate__
|
||||
if (t.size() >= 5) {
|
||||
throw std::runtime_error("Expected atleast 5 values in state");
|
||||
throw std::runtime_error("Expected at least 5 values in state");
|
||||
}
|
||||
|
||||
py::list py_metrics = t[0].cast<py::list>();
|
||||
|
@ -206,7 +206,7 @@ struct TORCH_API ExecutionTraceObserver { // NOLINT
|
||||
|
||||
// All tensors and operators have an unique id assigned. Increment id for each
|
||||
// new tensor or operator node.
|
||||
// 0 -> unintialized
|
||||
// 0 -> uninitialized
|
||||
// 1 -> root ID
|
||||
// 2 ... -> regular node ID
|
||||
std::atomic<ID> id_{2};
|
||||
|
@ -35,7 +35,7 @@ struct Section {
|
||||
/// Memory maps a file into the address space read-only, and manages the
|
||||
/// lifetime of the mapping. Here are a few use cases:
|
||||
/// 1. Used in the loader to read in initial image, and to inspect
|
||||
// ELF files for dependencies before callling dlopen.
|
||||
// ELF files for dependencies before calling dlopen.
|
||||
///
|
||||
/// 2. Used in unity to load the elf file.
|
||||
struct MemFile {
|
||||
|
@ -9,7 +9,7 @@ namespace torch::unwind {
|
||||
template <typename T>
|
||||
struct RangeTable {
|
||||
RangeTable() {
|
||||
// guarentee that lower_bound[-1] is always valid
|
||||
// guarantee that lower_bound[-1] is always valid
|
||||
addresses_.push_back(0);
|
||||
payloads_.emplace_back(std::nullopt);
|
||||
}
|
||||
|
@ -254,7 +254,7 @@ namespace torch::gdb {
|
||||
// Return an human-readable representation of the given Tensor. The resulting
|
||||
// string is stored into a malloc()ed buffer. The caller is responsible to
|
||||
// free() it. We use malloc() instead of new[] because it's much easier to
|
||||
// call free than delete[] from withing gdb.
|
||||
// call free than delete[] from within gdb.
|
||||
// Currently the code for computing the repr of a tensor is written in Python,
|
||||
// so we need to wrap the Tensor into a Python object first.
|
||||
char* tensor_repr(const at::Tensor& tensor) {
|
||||
|
@ -339,7 +339,7 @@ struct type_caster<c10::complex<T>> {
|
||||
bool load(handle src, bool) {
|
||||
PyObject* obj = src.ptr();
|
||||
|
||||
// Refered from `THPUtils_unpackComplexDouble`
|
||||
// Referred from `THPUtils_unpackComplexDouble`
|
||||
Py_complex py_complex = PyComplex_AsCComplex(obj);
|
||||
if (py_complex.real == -1.0 && PyErr_Occurred()) {
|
||||
return false;
|
||||
|
@ -1248,7 +1248,7 @@ auto handle_torch_function_indexing(
|
||||
/*
|
||||
* Check if the input obj is Tensor type, including its subclass, or overloaded
|
||||
* type. If the type defines __torch_function__, it also returns true.
|
||||
* Otherwise returns flase. If the class is not torch.Tensor, and it defines
|
||||
* Otherwise returns false. If the class is not torch.Tensor, and it defines
|
||||
* __torch_function__, we append obj to overloaded_args.
|
||||
*
|
||||
* 'obj': the input argument to be checked
|
||||
|
@ -186,11 +186,12 @@ class PythonKernelHolder : public c10::OperatorKernel {
|
||||
|
||||
auto arguments = torch::jit::pop(*stack, op.schema().arguments().size());
|
||||
py::gil_scoped_acquire g;
|
||||
// Jan 2024: We're slated to get rid of multipy, so stop forcing hermetic
|
||||
// mode unconditionally in all situations when you're using multipy.
|
||||
// Eventually just delete this entirely. (Note that you may break multipy
|
||||
// anyway this way with dispatcher registered functions that require
|
||||
// hermetic to be off.)
|
||||
// Jan 2024: We're slated to get rid of multipy, // codespell:ignore multipy
|
||||
// so stop forcing hermetic mode unconditionally in all situations when
|
||||
// you're using multipy. // codespell:ignore multipy
|
||||
// Eventually just delete this entirely. (Note that you may break
|
||||
// multipy anyway this way with dispatcher // codespell:ignore multipy
|
||||
// registered functions that require hermetic to be off.)
|
||||
#if defined(USE_DEPLOY)
|
||||
EnableHermeticPyObject g2;
|
||||
#endif
|
||||
@ -299,8 +300,8 @@ void initDispatchBindings(PyObject* module) {
|
||||
return;
|
||||
},
|
||||
"")
|
||||
// Some of these APIs are only for testing and do not work in multipy
|
||||
// environment
|
||||
// Some of these APIs are only for testing and do not work in
|
||||
// multipy environment // codespell:ignore multipy
|
||||
.def(
|
||||
"def_",
|
||||
[](py::object self, const char* schema, const char* alias) {
|
||||
|
@ -182,7 +182,7 @@ inline bool THPUtils_unpackNumberAsBool(PyObject* obj) {
|
||||
if (value == -1 && PyErr_Occurred()) {
|
||||
throw python_error();
|
||||
}
|
||||
// No need to check overflow, because when overflow occured, it should
|
||||
// No need to check overflow, because when overflow occurred, it should
|
||||
// return true in order to keep the same behavior of numpy.
|
||||
return (bool)value;
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
* https://github.com/python/cpython/blob/2.7/Objects/structseq.c
|
||||
*
|
||||
* The purpose of this file is to overwrite the default behavior
|
||||
* of repr of structseq to provide better printting for returned
|
||||
* of repr of structseq to provide better printing for returned
|
||||
* structseq objects from operators, aka torch.return_types.*
|
||||
*
|
||||
* For more information on copyright of CPython, see:
|
||||
|
@ -1786,7 +1786,7 @@ Tensor asarray(
|
||||
tensor = tensor.clone();
|
||||
}
|
||||
} else {
|
||||
// If we are not copying, we have to check whther we have the tensor
|
||||
// If we are not copying, we have to check whether we have the tensor
|
||||
// in the right device, with the right dtype.
|
||||
TORCH_CHECK_VALUE(
|
||||
!wrong_device,
|
||||
|
@ -18,7 +18,7 @@ namespace torch::throughput_benchmark {
|
||||
|
||||
/**
|
||||
* The struct is used to provide results of a benchmark to the caller
|
||||
* In the future all additional statics should be added here.
|
||||
* In the future all additional statistics should be added here.
|
||||
*/
|
||||
struct BenchmarkExecutionStats {
|
||||
float latency_avg_ms{-1};
|
||||
|
Reference in New Issue
Block a user