[Environment Variable][Rebase] Use thread-safe getenv functions (#140200)

Use our thread-safe getenv wrappers.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/140200
Approved by: https://github.com/kwen2501, https://github.com/eqy
This commit is contained in:
cyy
2025-05-02 00:41:49 +00:00
committed by PyTorch MergeBot
parent a5dd7011a0
commit ce94b212c7
23 changed files with 119 additions and 115 deletions

View File

@ -1,9 +1,8 @@
#include <ATen/core/dispatch/Dispatcher.h>
#include <ATen/core/PythonOpRegistrationTrampoline.h>
#include <chrono>
#include <list>
#include <sstream>
#include <utility>
#include <c10/util/env.h>
#ifdef FBCODE_CAFFE2
#include <c10/util/static_tracepoint.h>
@ -17,13 +16,13 @@ TORCH_SDT_DEFINE_SEMAPHORE(operator_end)
#endif
bool show_dispatch_trace() {
static auto envar = std::getenv("TORCH_SHOW_DISPATCH_TRACE");
static auto envar = c10::utils::get_env("TORCH_SHOW_DISPATCH_TRACE");
if (envar) {
if (strcmp(envar, "0") == 0) {
if (envar.has_value()) {
if (envar == "0") {
return false;
}
if (strcmp(envar, "1") == 0) {
if (envar == "1") {
return true;
}
TORCH_WARN(

View File

@ -7,6 +7,7 @@
#include <ATen/core/grad_mode.h>
#include <ATen/core/jit_type.h>
#include <c10/macros/Macros.h>
#include <c10/util/env.h>
#include <c10/util/flat_hash_map.h>
#include <c10/util/irange.h>
#include <array>
@ -45,9 +46,9 @@ static_assert(
"getTypePtr<std::tuple<int64_t, int64_t>> not returning const ref!");
TypeVerbosity type_verbosity() {
static const char* c_verbosity = std::getenv("PYTORCH_JIT_TYPE_VERBOSITY");
static const auto c_verbosity = c10::utils::get_env("PYTORCH_JIT_TYPE_VERBOSITY");
static TypeVerbosity verbosity = c_verbosity ?
static_cast<TypeVerbosity>(std::stoi(c_verbosity)) : TypeVerbosity::Default;
static_cast<TypeVerbosity>(std::stoi(c_verbosity.value())) : TypeVerbosity::Default;
return verbosity;
}

View File

@ -4,6 +4,7 @@
#include <c10/core/DeviceType.h>
#include <c10/util/Array.h>
#include <c10/util/Exception.h>
#include <c10/util/env.h>
#if !defined(__s390x__) && !defined(__powerpc__)
#include <cpuinfo.h>
@ -26,20 +27,20 @@ static inline bool cpu_has_vxe()
#endif
static CPUCapability compute_cpu_capability() {
auto envar = std::getenv("ATEN_CPU_CAPABILITY");
if (envar) {
const auto envar = c10::utils::get_env("ATEN_CPU_CAPABILITY");
if (envar.has_value()) {
#if defined(HAVE_VSX_CPU_DEFINITION)
if (strcmp(envar, "vsx") == 0) {
if (envar == "vsx") {
return CPUCapability::VSX;
}
#elif defined(HAVE_ZVECTOR_CPU_DEFINITION)
if (strcmp(envar, "zvector") == 0) {
if (envar == "zvector") {
return CPUCapability::ZVECTOR;
}
#elif defined(HAVE_SVE_CPU_DEFINITION)
int sve_vl = cpuinfo_get_max_arm_sve_length(); //Returns maximum SVE VL supported by your HW.
#ifdef HAVE_SVE256_CPU_DEFINITION
if (strcmp(envar, "sve256") == 0) {
if (envar == "sve256") {
if (sve_vl == 256) {
#ifdef HAVE_ARM_BF16_CPU_DEFINITION
if (cpuinfo_has_arm_bf16()) {
@ -53,20 +54,20 @@ static CPUCapability compute_cpu_capability() {
#endif
#else
#ifdef HAVE_AVX512_CPU_DEFINITION
if (strcmp(envar, "avx512") == 0) {
if (envar == "avx512") {
return CPUCapability::AVX512;
}
#endif
#ifdef HAVE_AVX2_CPU_DEFINITION
if (strcmp(envar, "avx2") == 0) {
if (envar == "avx2") {
return CPUCapability::AVX2;
}
#endif
#endif
if (strcmp(envar, "default") == 0) {
if (envar == "default") {
return CPUCapability::DEFAULT;
}
TORCH_WARN("ignoring invalid value for ATEN_CPU_CAPABILITY: ", envar);
TORCH_WARN("ignoring invalid value for ATEN_CPU_CAPABILITY: ", envar.value());
}
#if !defined(__powerpc__) && !defined(__s390x__) && !defined(HAVE_SVE_CPU_DEFINITION)

View File

@ -41,16 +41,8 @@ namespace at::native {
// Parse environment variable "TORCH_LINEAR_FLATTEN_3D"
static inline bool parseLinearFlatten3d() {
// Uninitialized value
static int value = -1;
if (value == -1) {
const char* env_str = std::getenv("TORCH_LINEAR_FLATTEN_3D");
if (env_str != nullptr && strcmp(env_str, "1") == 0) {
value = 1;
} else {
value = 0;
}
}
return bool(value);
static auto value = c10::utils::check_env("TORCH_LINEAR_FLATTEN_3D");
return value.has_value() && value.value();
}
// `_flatten_nd_linear` flattens all but the last dimension of the input tensor

View File

@ -23,6 +23,7 @@
#include <ATen/cpu/Utils.h>
#include <c10/core/GradMode.h>
#include <c10/util/accumulate.h>
#include <c10/util/env.h>
#include <c10/util/irange.h>
#include <variant>
@ -1366,8 +1367,8 @@ static inline int64_t get_mkldnn_matmul_min_dim() {
//it's enabled on all Neoverse cpus.
return is_arm_neoverse() ? 8 : 0;
}();
const char* ptr = std::getenv("TORCH_MKLDNN_MATMUL_MIN_DIM");
return ptr != nullptr ? std::atoi(ptr) : default_min_dim;
const auto value = c10::utils::get_env("TORCH_MKLDNN_MATMUL_MIN_DIM");
return value.has_value() ? std::stoi(value.value()) : default_min_dim;
}();
return value;
}
@ -1380,8 +1381,8 @@ static inline int64_t get_mkldnn_matmul_min_size() {
// it's enabled on all Neoverse cpus.
return is_arm_neoverse() ? 8 * 1024 : 0;
}();
const char* ptr = std::getenv("TORCH_MKLDNN_MATMUL_MIN_SIZE");
return ptr != nullptr ? std::atoi(ptr) : default_min_size;
const auto value = c10::utils::get_env("TORCH_MKLDNN_MATMUL_MIN_SIZE");
return value.has_value() ? std::stoi(value.value()) : default_min_size;
}();
return value;
}

View File

@ -257,8 +257,8 @@ cuda::blas::GEMMAndBiasActivationEpilogue activation_to_gemm_and_blas_arg(Activa
}
static bool getDisableAddmmCudaLt() {
static const char* env_value = std::getenv("DISABLE_ADDMM_CUDA_LT");
if (env_value != nullptr && strcmp(env_value, "1") == 0) {
static const auto env_value = c10::utils::get_env("DISABLE_ADDMM_CUDA_LT");
if (env_value == "1") {
return true;
}
return false;

View File

@ -8,6 +8,7 @@
#include <ATen/MemoryOverlap.h>
#include <torch/library.h>
#include <c10/util/env.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Operators.h>
#include <ATen/NativeFunctions.h>
@ -95,8 +96,8 @@ inline c10::List<::std::optional<Tensor>> to_meta(const c10::List<::std::optiona
}
static bool disable_meta_reference() {
static auto env = std::getenv("TORCH_DISABLE_FUNCTIONALIZATION_META_REFERENCE");
return env != nullptr && std::strcmp(env, "1") == 0;
static auto env = c10::utils::get_env("TORCH_DISABLE_FUNCTIONALIZATION_META_REFERENCE");
return env == "1";
}

View File

@ -42,20 +42,19 @@ size_t CUDAAllocatorConfig::roundup_power2_divisions(size_t size) {
}
void CUDAAllocatorConfig::lexArgs(
const char* env,
const std::string& env,
std::vector<std::string>& config) {
std::vector<char> buf;
size_t env_length = strlen(env);
for (size_t i = 0; i < env_length; i++) {
if (env[i] == ',' || env[i] == ':' || env[i] == '[' || env[i] == ']') {
for (char ch : env) {
if (ch == ',' || ch == ':' || ch == '[' || ch == ']') {
if (!buf.empty()) {
config.emplace_back(buf.begin(), buf.end());
buf.clear();
}
config.emplace_back(1, env[i]);
} else if (env[i] != ' ') {
buf.emplace_back(static_cast<char>(env[i]));
config.emplace_back(1, ch);
} else if (ch != ' ') {
buf.emplace_back(ch);
}
}
if (!buf.empty()) {
@ -289,7 +288,7 @@ size_t CUDAAllocatorConfig::parseAllocatorConfig(
#endif // USE_ROCM
}
void CUDAAllocatorConfig::parseArgs(const char* env) {
void CUDAAllocatorConfig::parseArgs(const std::optional<std::string>& env) {
// If empty, set the default values
m_max_split_size = std::numeric_limits<size_t>::max();
m_roundup_power2_divisions.assign(kRoundUpPowerOfTwoIntervals, 0);
@ -297,16 +296,16 @@ void CUDAAllocatorConfig::parseArgs(const char* env) {
bool used_cudaMallocAsync = false;
bool used_native_specific_option = false;
if (env == nullptr) {
if (!env.has_value()) {
return;
}
{
std::lock_guard<std::mutex> lock(m_last_allocator_settings_mutex);
m_last_allocator_settings = env;
m_last_allocator_settings = env.value();
}
std::vector<std::string> config;
lexArgs(env, config);
lexArgs(env.value(), config);
for (size_t i = 0; i < config.size(); i++) {
std::string_view config_item_view(config[i]);

View File

@ -2,6 +2,7 @@
#include <c10/cuda/CUDAMacros.h>
#include <c10/util/Exception.h>
#include <c10/util/env.h>
#include <atomic>
#include <cstddef>
@ -80,11 +81,11 @@ class C10_CUDA_API CUDAAllocatorConfig {
static CUDAAllocatorConfig& instance() {
static CUDAAllocatorConfig* s_instance = ([]() {
auto inst = new CUDAAllocatorConfig();
const char* env = getenv("PYTORCH_CUDA_ALLOC_CONF");
auto env = c10::utils::get_env("PYTORCH_CUDA_ALLOC_CONF");
#ifdef USE_ROCM
// convenience for ROCm users, allow alternative HIP token
if (!env) {
env = getenv("PYTORCH_HIP_ALLOC_CONF");
if (!env.has_value()) {
env = c10::utils::get_env("PYTORCH_HIP_ALLOC_CONF");
}
#endif
inst->parseArgs(env);
@ -93,12 +94,12 @@ class C10_CUDA_API CUDAAllocatorConfig {
return *s_instance;
}
void parseArgs(const char* env);
void parseArgs(const std::optional<std::string>& env);
private:
CUDAAllocatorConfig();
static void lexArgs(const char* env, std::vector<std::string>& config);
static void lexArgs(const std::string& env, std::vector<std::string>& config);
static void consumeToken(
const std::vector<std::string>& config,
size_t i,

View File

@ -80,10 +80,10 @@ bool has_env(const char* name) noexcept {
std::optional<bool> check_env(const char* name) {
auto env_opt = get_env(name);
if (env_opt.has_value()) {
if (*env_opt == "0") {
if (env_opt == "0") {
return false;
}
if (*env_opt == "1") {
if (env_opt == "1") {
return true;
}
TORCH_WARN(

View File

@ -130,15 +130,15 @@ inline int getCvarInt(const std::vector<std::string>& env, int def) {
* versions of a variable get higher priority than the latter
* versions of the same variable */
for (ssize_t i = static_cast<ssize_t>(env.size()) - 1; i >= 0; i--) {
char* val = std::getenv(env[i].c_str());
if (val == nullptr) {
const auto val = c10::utils::get_env(env[i].c_str());
if (!val.has_value()) {
continue;
} else if (i) {
WARN_ENV_VAR_ONCE(env[i], env[0]);
}
try {
ret = std::stoi(val);
ret = std::stoi(val.value());
} catch (std::exception&) {
TORCH_CHECK(false, "Invalid value for environment variable: " + env[i]);
}

View File

@ -3,6 +3,7 @@
#include <c10/util/Exception.h>
#include <c10/util/StringUtil.h>
#include <c10/util/env.h>
#include <c10/util/irange.h>
#include <caffe2/serialize/versions.h>
#include <torch/csrc/jit/api/function_impl.h>
@ -47,12 +48,11 @@ bool reportSourceLocation(size_t file_size) {
if (file_size < 512ull * 1024) {
return true;
}
const char* enable_env =
std::getenv("PYTORCH_JIT_ENABLE_LARGE_SOURCE_LOCATION");
const auto enable_env =
c10::utils::get_env("PYTORCH_JIT_ENABLE_LARGE_SOURCE_LOCATION");
bool flag = true;
if (enable_env == nullptr || std::strcmp(enable_env, "0") == 0 ||
std::strcmp(enable_env, "FALSE") == 0 ||
std::strcmp(enable_env, "false") == 0) {
if (!enable_env.has_value() || enable_env == "0" || enable_env == "FALSE" ||
enable_env == "false") {
flag = false;
}
return flag;

View File

@ -9,6 +9,7 @@
#include <ATen/core/function.h>
#include <c10/util/Exception.h>
#include <c10/util/StringUtil.h>
#include <c10/util/env.h>
#include <torch/csrc/jit/api/function_impl.h>
#include <torch/csrc/jit/frontend/error_report.h>
#include <torch/csrc/jit/ir/ir.h>
@ -32,8 +33,10 @@ class JitLoggingConfig {
std::ostream* out;
JitLoggingConfig() : out(&std::cerr) {
const char* jit_log_level = std::getenv("PYTORCH_JIT_LOG_LEVEL");
logging_levels.assign(jit_log_level == nullptr ? "" : jit_log_level);
const auto jit_log_level = c10::utils::get_env("PYTORCH_JIT_LOG_LEVEL");
if (jit_log_level.has_value()) {
logging_levels = jit_log_level.value();
}
parse();
}

View File

@ -7,6 +7,7 @@
#include <ATen/core/function.h>
#include <c10/util/Exception.h>
#include <c10/util/StringUtil.h>
#include <c10/util/env.h>
#include <torch/csrc/jit/api/function_impl.h>
#include <torch/csrc/jit/jit_opt_limit.h>
@ -26,11 +27,9 @@ static int parseOptLimit(const std::string& opt_limit) {
}
static std::unordered_map<std::string, int64_t> parseJITOptLimitOption(
const char* option) {
const std::string& option) {
std::stringstream in_ss;
if (option) {
in_ss << option;
}
in_ss << option;
std::unordered_map<std::string, int64_t> passes_to_opt_limits;
std::string line;
while (std::getline(in_ss, line, ':')) {
@ -48,14 +47,14 @@ static std::unordered_map<std::string, int64_t> parseJITOptLimitOption(
}
bool opt_limit(const char* pass_name) {
static const char* opt_limit = std::getenv("PYTORCH_JIT_OPT_LIMIT");
static const auto opt_limit = c10::utils::get_env("PYTORCH_JIT_OPT_LIMIT");
// if nothing is provided, let's allow everything
if (!opt_limit) {
if (!opt_limit.has_value()) {
return true;
}
static const std::unordered_map<std::string, int64_t> passes_to_opt_limits =
parseJITOptLimitOption(opt_limit);
parseJITOptLimitOption(opt_limit.value());
std::string pass = std::filesystem::path(pass_name).stem().string();
auto opt_limit_it = passes_to_opt_limits.find(pass);

View File

@ -156,11 +156,11 @@ void setTensorExprFuserEnabled(bool val) {
}
bool tensorExprFuserEnabled() {
static const char* enable_c_str = std::getenv("PYTORCH_TENSOREXPR");
if (!enable_c_str) {
static const auto enable_opt = c10::utils::get_env("PYTORCH_TENSOREXPR");
if (!enable_opt.has_value()) {
return texpr_fuser_enabled_;
}
if (std::string(enable_c_str) == "0") {
if (enable_opt == "0") {
return false;
}
return true;
@ -1294,10 +1294,10 @@ class TensorExprFuser {
// 'PYTORCH_TENSOREXPR_DONT_FUSE="clamp:mul:add"' disables fusion on
// aten::clamp, aten::mul and aten::add.
void parseTENotFuseOption() {
const char* option = std::getenv("PYTORCH_TENSOREXPR_DONT_FUSE");
const auto option = c10::utils::get_env("PYTORCH_TENSOREXPR_DONT_FUSE");
std::stringstream in_ss;
if (option) {
in_ss << option;
if (option.has_value()) {
in_ss << option.value();
}
std::string line;

View File

@ -863,7 +863,7 @@ bool GraphExecutor::isOptimized() const {
TORCH_API bool IsNewExecutorEnabled() {
static const auto disable_new_executor =
std::getenv("TORCH_JIT_DISABLE_NEW_EXECUTOR");
c10::utils::has_env("TORCH_JIT_DISABLE_NEW_EXECUTOR");
return getExecutorMode() && FLAGS_torch_jit_enable_new_executor &&
!disable_new_executor;
}

View File

@ -54,47 +54,49 @@ bool setFallbackAllowed(bool value) {
}
bool fallbackAllowed() {
static const char* enable_c_str = std::getenv("PYTORCH_TENSOREXPR_FALLBACK");
if (!enable_c_str) {
static const auto enable_opt =
c10::utils::get_env("PYTORCH_TENSOREXPR_FALLBACK");
if (!enable_opt.has_value()) {
return fallback_allowed;
}
if (std::string(enable_c_str) == "0") {
if (enable_opt == "0") {
return false;
}
return true;
}
static bool fallbackEnforced() {
static const char* enable_c_str = std::getenv("PYTORCH_TENSOREXPR_FALLBACK");
static const auto enable_opt =
c10::utils::get_env("PYTORCH_TENSOREXPR_FALLBACK");
if (tensorexpr::getTEGenerateBlockCode()) {
return false;
}
if (!enable_c_str) {
if (!enable_opt.has_value()) {
return fallback_allowed;
}
if (std::string(enable_c_str) == "2") {
if (enable_opt == "2") {
return true;
}
return false;
}
static int64_t randomTransformsRequested() {
const char* enable_c_str =
std::getenv("PYTORCH_TENSOREXPR_RANDOM_TRANSFORM_SEED");
if (!enable_c_str) {
const auto enable_opt =
c10::utils::get_env("PYTORCH_TENSOREXPR_RANDOM_TRANSFORM_SEED");
if (!enable_opt.has_value()) {
return 0;
}
return std::stoi(std::string(enable_c_str));
return std::stoi(enable_opt.value());
}
#ifdef TORCH_ENABLE_LLVM
static bool dontUseLLVMFlag() {
static const char* enable_c_str =
std::getenv("PYTORCH_TENSOREXPR_DONT_USE_LLVM");
if (!enable_c_str) {
static const auto enable_opt =
c10::utils::get_env("PYTORCH_TENSOREXPR_DONT_USE_LLVM");
if (!enable_opt) {
return false;
}
return std::string(enable_c_str) == "1";
return enable_opt == "1";
}
#endif

View File

@ -1,3 +1,4 @@
#include <c10/util/env.h>
#include <torch/csrc/lazy/core/config.h>
C10_DEFINE_bool(torch_lazy_ir_debug, false, "Enable lazy tensor IR debugging")
@ -76,9 +77,9 @@ namespace torch::lazy {
std::string& getLTCForceFallback() {
static std::string config;
static bool _ignore = [&]() {
char* envptr = std::getenv("LTC_FORCE_FALLBACK");
if (envptr) {
config = std::string(envptr);
auto env = c10::utils::get_env("LTC_FORCE_FALLBACK");
if (env.has_value()) {
config = std::string(env.value());
}
return true;
}();

View File

@ -1,3 +1,4 @@
#include <c10/util/env.h>
#include <c10/util/irange.h>
#include <torch/csrc/lazy/core/debug_util.h>
@ -17,8 +18,8 @@ namespace torch::lazy {
namespace {
std::string GetEnvString(const char* name, const std::string& defval) {
const char* env = std::getenv(name);
return env != nullptr ? env : defval;
const auto env = c10::utils::get_env(name);
return env.value_or(defval);
}
DebugUtil::GraphFormat DefaultGraphFormat() {

View File

@ -1,3 +1,4 @@
#include <c10/util/env.h>
#include <c10/util/irange.h>
#include <torch/csrc/lazy/core/shape.h>
#include <torch/csrc/lazy/core/tensor.h>
@ -57,7 +58,7 @@ Shape Shape::with_symbolic_dims(
}
bool symbolicShapeEnabled() {
static bool enabled = std::getenv("LTC_ENABLE_SYMBOLIC_SHAPES") != nullptr;
static bool enabled = c10::utils::has_env("LTC_ENABLE_SYMBOLIC_SHAPES");
return enabled || FLAGS_ltc_enable_symbolic_shapes;
}

View File

@ -7,6 +7,7 @@
#endif
#include <c10/util/Exception.h>
#include <c10/util/env.h>
namespace torch {
@ -221,11 +222,9 @@ bool collectivesProfilerExists() {
#if defined(KINETO_HAS_HCCL_PROFILER)
return true;
#endif
const char* val = std::getenv("TORCH_PROFILER_ENABLE_COLLECTIVE_PROFILING");
if (val == nullptr) {
return false;
}
return std::strcmp(val, "1") == 0;
const auto val =
c10::utils::get_env("TORCH_PROFILER_ENABLE_COLLECTIVE_PROFILING");
return val == "1";
}
#ifdef USE_KINETO

View File

@ -25,6 +25,7 @@
#include <ATen/core/function_schema.h>
#include <ATen/core/stack.h>
#include <ATen/record_function.h>
#include <c10/util/env.h>
#include <c10/util/irange.h>
#include <torch/csrc/profiler/standalone/execution_trace_observer.h>
#include <torch/csrc/profiler/util.h>
@ -898,18 +899,18 @@ bool addExecutionTraceObserver(const std::string& output_file_path) {
// check if the environment variable is set to force recording integer
// tensors
auto env_variable =
getenv("ENABLE_PYTORCH_EXECUTION_TRACE_SAVE_INTEGRAL_TENSOR_RANGE");
if (env_variable != nullptr) {
auto env_variable = c10::utils::get_env(
"ENABLE_PYTORCH_EXECUTION_TRACE_SAVE_INTEGRAL_TENSOR_RANGE");
if (env_variable.has_value()) {
ob.record_integral_tensor_range = true;
}
// check if the environment variable is set to force recording integer
// tensors
env_variable =
getenv("ENABLE_PYTORCH_EXECUTION_TRACE_SAVE_INTEGRAL_TENSOR_DATA");
if (env_variable != nullptr) {
std::istringstream stream(env_variable);
env_variable = c10::utils::get_env(
"ENABLE_PYTORCH_EXECUTION_TRACE_SAVE_INTEGRAL_TENSOR_DATA");
if (env_variable.has_value()) {
std::istringstream stream(env_variable.value());
std::string token;
while (std::getline(stream, token, ',')) {
ob.nodeListForSavingIntegerTensor.insert(token);

View File

@ -1,5 +1,6 @@
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <c10/util/env.h>
#include <torch/csrc/profiler/unwind/unwind.h>
#include <torch/csrc/utils/cpp_stacktraces.h>
@ -321,10 +322,10 @@ static std::string dladdr_lookup(void* addr) {
struct Symbolizer {
Symbolizer() {
auto envar = std::getenv("TORCH_ADDR2LINE_BINARY");
if (envar != nullptr) {
auto envar = c10::utils::get_env("TORCH_ADDR2LINE_BINARY");
if (envar.has_value()) {
// currently we take user's input as is without checking
addr2line_binary_ = envar;
addr2line_binary_ = std::move(envar.value());
TORCH_WARN("Use custom addr2line binary: ", addr2line_binary_);
} else {
addr2line_binary_ = "addr2line"; // default
@ -379,7 +380,7 @@ struct Symbolizer {
private:
static constexpr int BLOCK = 1024;
const char* addr2line_binary_;
std::string addr2line_binary_;
struct Entry {
std::unique_ptr<Communicate> comm;
std::vector<void*> queried;
@ -394,12 +395,13 @@ struct Symbolizer {
if (it == entries_.end()) {
// NOLINTNEXTLINE(*-c-arrays*)
const char* args[] = {
addr2line_binary_, "-C", "-f", "-e", name.c_str(), nullptr};
addr2line_binary_.c_str(), "-C", "-f", "-e", name.c_str(), nullptr};
it = entries_
.insert_or_assign(
name,
Entry{
std::make_unique<Communicate>(addr2line_binary_, args),
std::make_unique<Communicate>(
addr2line_binary_.c_str(), args),
{}})
.first;
}