Remove c10::optional usages in PyTorch (#139525)

Test Plan: Sandcastle

Reviewed By: swolchok

Pull Request resolved: https://github.com/pytorch/pytorch/pull/139525
Approved by: https://github.com/malfet, https://github.com/Skylion007
This commit is contained in:
Richard Barnes
2024-11-04 15:35:23 +00:00
committed by PyTorch MergeBot
parent 419a7e197d
commit 6b8e3022f2
14 changed files with 20 additions and 20 deletions

View File

@ -536,10 +536,10 @@ _scaled_dot_product_flash_attention_batch_rule(
}
fourOutputs _scaled_dot_product_efficient_attention_batch_rule(
const Tensor& query, optional<int64_t> query_bdim,
const Tensor& key, optional<int64_t> key_bdim,
const Tensor& value, optional<int64_t> value_bdim,
const std::optional<Tensor>& attn_bias, optional<int64_t> attn_bias_bdim,
const Tensor& query, std::optional<int64_t> query_bdim,
const Tensor& key, std::optional<int64_t> key_bdim,
const Tensor& value, std::optional<int64_t> value_bdim,
const std::optional<Tensor>& attn_bias, std::optional<int64_t> attn_bias_bdim,
bool compute_log_sumexp,
double dropout_p,
bool is_causal,

View File

@ -173,7 +173,7 @@ class DataLoaderBase {
} else if (auto batch_request = get_batch_request()) {
return this->main_thread_dataset_->get_batch(std::move(*batch_request));
}
return nullopt;
return std::nullopt;
}
/// The function that worker threads run.

View File

@ -68,7 +68,7 @@ class StatelessDataLoader : public DataLoaderBase<
if (!indices ||
(indices->size() < this->options_.batch_size &&
this->options_.drop_last)) {
return nullopt;
return std::nullopt;
}
AT_ASSERT(indices->size() > 0);
return indices;

View File

@ -74,7 +74,7 @@ class BatchDataBuffer {
if (batch_queue_.empty()) {
AT_ASSERT(stop_);
// All batches have been retrieved. Return an empty batch.
return nullopt;
return std::nullopt;
}
UnwrappedBatchData batch = std::move(batch_queue_.front());

View File

@ -85,7 +85,7 @@ class MapDataset : public BatchDataset<
if (auto batch = dataset_.get_batch(std::move(indices))) {
return transform_.apply_batch(std::move(*batch));
}
return nullopt;
return std::nullopt;
}
/// The underlying dataset being transformed.

View File

@ -49,7 +49,7 @@ class DataShuttle {
--in_flight_jobs_;
return result;
}
return nullopt;
return std::nullopt;
}
/// Discards any jobs that are not yet in flight, and waits for all in-flight

View File

@ -90,7 +90,7 @@ struct OrderedSequencer : public Sequencer<Result> {
buffer(result->sequence_number) = std::move(result);
}
// The result was an empty optional, so we are done with this epoch.
return nullopt;
return std::nullopt;
}
/// Accesses the buffer at the `index` modulo the buffer size.

View File

@ -26,7 +26,7 @@ DistributedRandomSampler::DistributedRandomSampler(
std::optional<std::vector<size_t>> DistributedRandomSampler::next(
size_t batch_size) {
if (sample_index_ == end_index_) {
return nullopt;
return std::nullopt;
}
size_t end = sample_index_ + batch_size;
@ -109,7 +109,7 @@ DistributedSequentialSampler::DistributedSequentialSampler(
std::optional<std::vector<size_t>> DistributedSequentialSampler::next(
size_t batch_size) {
if (sample_index_ == end_index_) {
return nullopt;
return std::nullopt;
}
size_t end = sample_index_ + batch_size;

View File

@ -20,11 +20,11 @@ void RandomSampler::reset(std::optional<size_t> new_size) {
index_ = 0;
}
optional<std::vector<size_t>> RandomSampler::next(size_t batch_size) {
std::optional<std::vector<size_t>> RandomSampler::next(size_t batch_size) {
AT_ASSERT(index_ <= indices_.numel());
const size_t remaining_indices = indices_.numel() - index_;
if (remaining_indices == 0) {
return nullopt;
return std::nullopt;
}
std::vector<size_t> index_batch(std::min(batch_size, remaining_indices));
auto slice = indices_.slice(/*dim=*/0, index_, index_ + index_batch.size());

View File

@ -19,7 +19,7 @@ void SequentialSampler::reset(std::optional<size_t> new_size) {
std::optional<std::vector<size_t>> SequentialSampler::next(size_t batch_size) {
const auto remaining_indices = size_ - index_;
if (remaining_indices == 0) {
return nullopt;
return std::nullopt;
}
std::vector<size_t> index_batch(std::min(batch_size, remaining_indices));
for (auto& i : index_batch) {

View File

@ -28,7 +28,7 @@ void StreamSampler::reset(std::optional<size_t> new_size) {
std::optional<BatchSize> StreamSampler::next(size_t batch_size) {
AT_ASSERT(examples_retrieved_so_far_ <= epoch_size_);
if (examples_retrieved_so_far_ == epoch_size_) {
return nullopt;
return std::nullopt;
}
if (examples_retrieved_so_far_ + batch_size > epoch_size_) {
batch_size = epoch_size_ - examples_retrieved_so_far_;

View File

@ -379,7 +379,7 @@ static std::optional<int64_t> maybe_current_level() {
int64_t current_level = maybe_layer->layerId();
return current_level;
}
return nullopt;
return std::nullopt;
}
static void tls_set_vmap_excluded(bool excluded) {

View File

@ -24,7 +24,7 @@ std::optional<std::vector<size_t>> RandomSampler::next(size_t batch_size) {
AT_ASSERT(index_ <= indices_.numel());
const size_t remaining_indices = indices_.numel() - index_;
if (remaining_indices == 0) {
return nullopt;
return std::nullopt;
}
std::vector<size_t> index_batch(std::min(batch_size, remaining_indices));
auto slice = indices_.slice(/*dim=*/0, index_, index_ + index_batch.size());

View File

@ -15,10 +15,10 @@ void SequentialSampler::reset(std::optional<size_t> new_size) {
index_ = 0;
}
optional<std::vector<size_t>> SequentialSampler::next(size_t batch_size) {
std::optional<std::vector<size_t>> SequentialSampler::next(size_t batch_size) {
const auto remaining_indices = size_ - index_;
if (remaining_indices == 0) {
return nullopt;
return std::nullopt;
}
std::vector<size_t> index_batch(std::min(batch_size, remaining_indices));
for (auto& i : index_batch) {