mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Remove c10::optional usages in PyTorch (#139525)
Test Plan: Sandcastle Reviewed By: swolchok Pull Request resolved: https://github.com/pytorch/pytorch/pull/139525 Approved by: https://github.com/malfet, https://github.com/Skylion007
This commit is contained in:
committed by
PyTorch MergeBot
parent
419a7e197d
commit
6b8e3022f2
@ -536,10 +536,10 @@ _scaled_dot_product_flash_attention_batch_rule(
|
||||
}
|
||||
|
||||
fourOutputs _scaled_dot_product_efficient_attention_batch_rule(
|
||||
const Tensor& query, optional<int64_t> query_bdim,
|
||||
const Tensor& key, optional<int64_t> key_bdim,
|
||||
const Tensor& value, optional<int64_t> value_bdim,
|
||||
const std::optional<Tensor>& attn_bias, optional<int64_t> attn_bias_bdim,
|
||||
const Tensor& query, std::optional<int64_t> query_bdim,
|
||||
const Tensor& key, std::optional<int64_t> key_bdim,
|
||||
const Tensor& value, std::optional<int64_t> value_bdim,
|
||||
const std::optional<Tensor>& attn_bias, std::optional<int64_t> attn_bias_bdim,
|
||||
bool compute_log_sumexp,
|
||||
double dropout_p,
|
||||
bool is_causal,
|
||||
|
@ -173,7 +173,7 @@ class DataLoaderBase {
|
||||
} else if (auto batch_request = get_batch_request()) {
|
||||
return this->main_thread_dataset_->get_batch(std::move(*batch_request));
|
||||
}
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/// The function that worker threads run.
|
||||
|
@ -68,7 +68,7 @@ class StatelessDataLoader : public DataLoaderBase<
|
||||
if (!indices ||
|
||||
(indices->size() < this->options_.batch_size &&
|
||||
this->options_.drop_last)) {
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
AT_ASSERT(indices->size() > 0);
|
||||
return indices;
|
||||
|
@ -74,7 +74,7 @@ class BatchDataBuffer {
|
||||
if (batch_queue_.empty()) {
|
||||
AT_ASSERT(stop_);
|
||||
// All batches have been retrieved. Return an empty batch.
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
UnwrappedBatchData batch = std::move(batch_queue_.front());
|
||||
|
@ -85,7 +85,7 @@ class MapDataset : public BatchDataset<
|
||||
if (auto batch = dataset_.get_batch(std::move(indices))) {
|
||||
return transform_.apply_batch(std::move(*batch));
|
||||
}
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/// The underlying dataset being transformed.
|
||||
|
@ -49,7 +49,7 @@ class DataShuttle {
|
||||
--in_flight_jobs_;
|
||||
return result;
|
||||
}
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/// Discards any jobs that are not yet in flight, and waits for all in-flight
|
||||
|
@ -90,7 +90,7 @@ struct OrderedSequencer : public Sequencer<Result> {
|
||||
buffer(result->sequence_number) = std::move(result);
|
||||
}
|
||||
// The result was an empty optional, so we are done with this epoch.
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/// Accesses the buffer at the `index` modulo the buffer size.
|
||||
|
@ -26,7 +26,7 @@ DistributedRandomSampler::DistributedRandomSampler(
|
||||
std::optional<std::vector<size_t>> DistributedRandomSampler::next(
|
||||
size_t batch_size) {
|
||||
if (sample_index_ == end_index_) {
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
size_t end = sample_index_ + batch_size;
|
||||
@ -109,7 +109,7 @@ DistributedSequentialSampler::DistributedSequentialSampler(
|
||||
std::optional<std::vector<size_t>> DistributedSequentialSampler::next(
|
||||
size_t batch_size) {
|
||||
if (sample_index_ == end_index_) {
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
size_t end = sample_index_ + batch_size;
|
||||
|
@ -20,11 +20,11 @@ void RandomSampler::reset(std::optional<size_t> new_size) {
|
||||
index_ = 0;
|
||||
}
|
||||
|
||||
optional<std::vector<size_t>> RandomSampler::next(size_t batch_size) {
|
||||
std::optional<std::vector<size_t>> RandomSampler::next(size_t batch_size) {
|
||||
AT_ASSERT(index_ <= indices_.numel());
|
||||
const size_t remaining_indices = indices_.numel() - index_;
|
||||
if (remaining_indices == 0) {
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
std::vector<size_t> index_batch(std::min(batch_size, remaining_indices));
|
||||
auto slice = indices_.slice(/*dim=*/0, index_, index_ + index_batch.size());
|
||||
|
@ -19,7 +19,7 @@ void SequentialSampler::reset(std::optional<size_t> new_size) {
|
||||
std::optional<std::vector<size_t>> SequentialSampler::next(size_t batch_size) {
|
||||
const auto remaining_indices = size_ - index_;
|
||||
if (remaining_indices == 0) {
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
std::vector<size_t> index_batch(std::min(batch_size, remaining_indices));
|
||||
for (auto& i : index_batch) {
|
||||
|
@ -28,7 +28,7 @@ void StreamSampler::reset(std::optional<size_t> new_size) {
|
||||
std::optional<BatchSize> StreamSampler::next(size_t batch_size) {
|
||||
AT_ASSERT(examples_retrieved_so_far_ <= epoch_size_);
|
||||
if (examples_retrieved_so_far_ == epoch_size_) {
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
if (examples_retrieved_so_far_ + batch_size > epoch_size_) {
|
||||
batch_size = epoch_size_ - examples_retrieved_so_far_;
|
||||
|
@ -379,7 +379,7 @@ static std::optional<int64_t> maybe_current_level() {
|
||||
int64_t current_level = maybe_layer->layerId();
|
||||
return current_level;
|
||||
}
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
static void tls_set_vmap_excluded(bool excluded) {
|
||||
|
@ -24,7 +24,7 @@ std::optional<std::vector<size_t>> RandomSampler::next(size_t batch_size) {
|
||||
AT_ASSERT(index_ <= indices_.numel());
|
||||
const size_t remaining_indices = indices_.numel() - index_;
|
||||
if (remaining_indices == 0) {
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
std::vector<size_t> index_batch(std::min(batch_size, remaining_indices));
|
||||
auto slice = indices_.slice(/*dim=*/0, index_, index_ + index_batch.size());
|
||||
|
@ -15,10 +15,10 @@ void SequentialSampler::reset(std::optional<size_t> new_size) {
|
||||
index_ = 0;
|
||||
}
|
||||
|
||||
optional<std::vector<size_t>> SequentialSampler::next(size_t batch_size) {
|
||||
std::optional<std::vector<size_t>> SequentialSampler::next(size_t batch_size) {
|
||||
const auto remaining_indices = size_ - index_;
|
||||
if (remaining_indices == 0) {
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
std::vector<size_t> index_batch(std::min(batch_size, remaining_indices));
|
||||
for (auto& i : index_batch) {
|
||||
|
Reference in New Issue
Block a user