mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Renaming IValue List functions (#32093)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/32093 toGenericListRef -> toListRef isGenericList -> isList toGenericList -> toList toXListRef -> toXVector Test Plan: Imported from OSS Reviewed By: suo Differential Revision: D19369767 Pulled By: zdevito fbshipit-source-id: 4f0078f95b83e6586524c03f7bcf206722fdd9ae
This commit is contained in:
committed by
Facebook Github Bot
parent
bdd5e15437
commit
7e3c438913
@ -340,8 +340,8 @@ facebook::jni::local_ref<JIValue> JIValue::newJIValueFromAtIValue(
|
||||
(*jArray)[index++] = TensorHybrid::newJTensorFromAtTensor(e);
|
||||
}
|
||||
return jMethodTensorListArr(JIValue::javaClassStatic(), jArray);
|
||||
} else if (ivalue.isGenericList()) {
|
||||
auto list = ivalue.toGenericList();
|
||||
} else if (ivalue.isList()) {
|
||||
auto list = ivalue.toList();
|
||||
static auto jMethodListArr =
|
||||
JIValue::javaClassStatic()
|
||||
->getStaticMethod<facebook::jni::local_ref<JIValue>(
|
||||
|
@ -19,12 +19,12 @@ inline bool shallowEquals(const IValue& lhs, const IValue& rhs) {
|
||||
return rhs.isDouble() && lhs.toDouble() == rhs.toDouble();
|
||||
} else if (lhs.isBool()) {
|
||||
return rhs.isBool() && lhs.toBool() == rhs.toBool();
|
||||
} else if (lhs.isGenericList()) {
|
||||
if (!rhs.isGenericList()) {
|
||||
} else if (lhs.isList()) {
|
||||
if (!rhs.isList()) {
|
||||
return false;
|
||||
}
|
||||
auto l = lhs.toGenericListRef();
|
||||
auto r = rhs.toGenericListRef();
|
||||
auto l = lhs.toListRef();
|
||||
auto r = rhs.toListRef();
|
||||
if (l.size() != r.size()) {
|
||||
return false;
|
||||
}
|
||||
|
@ -174,7 +174,7 @@ private:
|
||||
};
|
||||
|
||||
template<class T> List<T> toTypedList(List<IValue> list);
|
||||
template<class T> List<IValue> toGenericList(List<T> list);
|
||||
template<class T> List<IValue> toList(List<T> list);
|
||||
const IValue* ptr_to_first_element(const List<IValue>& list);
|
||||
}
|
||||
template<class T> bool list_is_equal(const List<T>& lhs, const List<T>& rhs);
|
||||
@ -417,7 +417,7 @@ private:
|
||||
explicit List(c10::intrusive_ptr<detail::ListImpl>&& elements);
|
||||
friend struct IValue;
|
||||
template<class T_> friend List<T_> impl::toTypedList(List<IValue>);
|
||||
template<class T_> friend List<IValue> impl::toGenericList(List<T_>);
|
||||
template<class T_> friend List<IValue> impl::toList(List<T_>);
|
||||
friend const IValue* impl::ptr_to_first_element(const List<IValue>& list);
|
||||
};
|
||||
|
||||
|
@ -54,7 +54,7 @@ List<T> toTypedList(impl::GenericList list) {
|
||||
}
|
||||
|
||||
template<class T>
|
||||
impl::GenericList toGenericList(List<T> list) {
|
||||
impl::GenericList toList(List<T> list) {
|
||||
return GenericList(std::move(list.impl_));
|
||||
}
|
||||
}
|
||||
|
@ -180,10 +180,10 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithTensorLi
|
||||
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPUTensorId), dummyTensor(DispatchKey::CUDATensorId), dummyTensor(DispatchKey::CPUTensorId));
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(3, result[0].toTensorListRef().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorListRef()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[0].toTensorListRef()[1]));
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorListRef()[2]));
|
||||
EXPECT_EQ(3, result[0].toTensorVector().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorVector()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[0].toTensorVector()[1]));
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorVector()[2]));
|
||||
}
|
||||
|
||||
std::vector<int64_t> kernelWithIntListOutput(const Tensor&, int64_t input1, int64_t input2, int64_t input3) {
|
||||
@ -199,10 +199,10 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntListO
|
||||
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPUTensorId), 2, 4, 6);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(3, result[0].toIntListRef().size());
|
||||
EXPECT_EQ(2, result[0].toIntListRef()[0]);
|
||||
EXPECT_EQ(4, result[0].toIntListRef()[1]);
|
||||
EXPECT_EQ(6, result[0].toIntListRef()[2]);
|
||||
EXPECT_EQ(3, result[0].toIntVector().size());
|
||||
EXPECT_EQ(2, result[0].toIntVector()[0]);
|
||||
EXPECT_EQ(4, result[0].toIntVector()[1]);
|
||||
EXPECT_EQ(6, result[0].toIntVector()[2]);
|
||||
}
|
||||
|
||||
std::tuple<Tensor, int64_t, std::vector<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>> kernelWithMultipleOutputs(Tensor) {
|
||||
@ -229,9 +229,9 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithMultiple
|
||||
EXPECT_EQ(5, result.size());
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[0].toTensor()));
|
||||
EXPECT_EQ(5, result[1].toInt());
|
||||
EXPECT_EQ(2, result[2].toTensorListRef().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[2].toTensorListRef()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[2].toTensorListRef()[1]));
|
||||
EXPECT_EQ(2, result[2].toTensorVector().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[2].toTensorVector()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[2].toTensorVector()[1]));
|
||||
EXPECT_EQ(0, result[3].toInt());
|
||||
auto result_dict = c10::impl::toTypedDict<string, Tensor>(result[4].toGenericDict());
|
||||
EXPECT_EQ(2, result_dict.size());
|
||||
@ -423,13 +423,13 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithTensorLi
|
||||
EXPECT_EQ(2, outputs[0].toInt());
|
||||
}
|
||||
|
||||
void kernelWithLegacyTensorListRefInputWithoutOutput(const std::vector<Tensor>& input1) {
|
||||
void kernelWithLegacyTensorVectorInputWithoutOutput(const std::vector<Tensor>& input1) {
|
||||
captured_input_list_size = input1.size();
|
||||
}
|
||||
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithLegacyTensorListRefInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithLegacyTensorVectorInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> ()", &kernelWithLegacyTensorListRefInputWithoutOutput);
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> ()", &kernelWithLegacyTensorVectorInputWithoutOutput);
|
||||
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::tensor_list_input", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
@ -440,13 +440,13 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithLegacyTe
|
||||
EXPECT_EQ(2, captured_input_list_size);
|
||||
}
|
||||
|
||||
int64_t kernelWithLegacyTensorListRefInputWithOutput(const std::vector<Tensor>& input1) {
|
||||
int64_t kernelWithLegacyTensorVectorInputWithOutput(const std::vector<Tensor>& input1) {
|
||||
return input1.size();
|
||||
}
|
||||
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithLegacyTensorListRefInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithLegacyTensorVectorInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> int", &kernelWithLegacyTensorListRefInputWithOutput);
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> int", &kernelWithLegacyTensorVectorInputWithOutput);
|
||||
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::tensor_list_input", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
@ -503,7 +503,7 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithStringLi
|
||||
c10::List<std::string> list({"value1", "value2"});
|
||||
auto outputs = callOp(*op, list);
|
||||
EXPECT_EQ(1, outputs.size());
|
||||
auto output = std::move(outputs[0]).toGenericList();
|
||||
auto output = std::move(outputs[0]).toList();
|
||||
|
||||
EXPECT_EQ(2, output.size());
|
||||
EXPECT_EQ("value1", output.get(0).toString()->string());
|
||||
@ -717,7 +717,7 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithListOfMa
|
||||
c10::List<c10::Dict<string, int64_t>> list({dict1, dict2});
|
||||
auto outputs = callOp(*op, list);
|
||||
EXPECT_EQ(1, outputs.size());
|
||||
c10::impl::GenericList output = std::move(outputs[0]).toGenericList();
|
||||
c10::impl::GenericList output = std::move(outputs[0]).toList();
|
||||
|
||||
EXPECT_EQ(2, output.size());
|
||||
EXPECT_EQ(2, output.get(0).toGenericDict().size());
|
||||
@ -748,22 +748,22 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithListOfMa
|
||||
c10::List<c10::Dict<string, c10::List<int64_t>>> list({ dict1, dict2 });
|
||||
auto outputs = callOp(*op, list);
|
||||
EXPECT_EQ(1, outputs.size());
|
||||
c10::impl::GenericList output = std::move(outputs[0]).toGenericList();
|
||||
c10::impl::GenericList output = std::move(outputs[0]).toList();
|
||||
|
||||
EXPECT_EQ(2, output.size());
|
||||
EXPECT_EQ(2, output.get(0).toGenericDict().size());
|
||||
EXPECT_EQ(2, output.get(0).toGenericDict().at("1").toIntListRef().size());
|
||||
EXPECT_EQ(1, output.get(0).toGenericDict().at("1").toIntListRef()[0]);
|
||||
EXPECT_EQ(2, output.get(0).toGenericDict().at("1").toIntListRef()[1]);
|
||||
EXPECT_EQ(2, output.get(0).toGenericDict().at("3").toIntListRef().size());
|
||||
EXPECT_EQ(3, output.get(0).toGenericDict().at("3").toIntListRef()[0]);
|
||||
EXPECT_EQ(4, output.get(0).toGenericDict().at("3").toIntListRef()[1]);
|
||||
EXPECT_EQ(2, output.get(1).toGenericDict().at("5").toIntListRef().size());
|
||||
EXPECT_EQ(5, output.get(1).toGenericDict().at("5").toIntListRef()[0]);
|
||||
EXPECT_EQ(6, output.get(1).toGenericDict().at("5").toIntListRef()[1]);
|
||||
EXPECT_EQ(2, output.get(1).toGenericDict().at("7").toIntListRef().size());
|
||||
EXPECT_EQ(7, output.get(1).toGenericDict().at("7").toIntListRef()[0]);
|
||||
EXPECT_EQ(8, output.get(1).toGenericDict().at("7").toIntListRef()[1]);
|
||||
EXPECT_EQ(2, output.get(0).toGenericDict().at("1").toIntVector().size());
|
||||
EXPECT_EQ(1, output.get(0).toGenericDict().at("1").toIntVector()[0]);
|
||||
EXPECT_EQ(2, output.get(0).toGenericDict().at("1").toIntVector()[1]);
|
||||
EXPECT_EQ(2, output.get(0).toGenericDict().at("3").toIntVector().size());
|
||||
EXPECT_EQ(3, output.get(0).toGenericDict().at("3").toIntVector()[0]);
|
||||
EXPECT_EQ(4, output.get(0).toGenericDict().at("3").toIntVector()[1]);
|
||||
EXPECT_EQ(2, output.get(1).toGenericDict().at("5").toIntVector().size());
|
||||
EXPECT_EQ(5, output.get(1).toGenericDict().at("5").toIntVector()[0]);
|
||||
EXPECT_EQ(6, output.get(1).toGenericDict().at("5").toIntVector()[1]);
|
||||
EXPECT_EQ(2, output.get(1).toGenericDict().at("7").toIntVector().size());
|
||||
EXPECT_EQ(7, output.get(1).toGenericDict().at("7").toIntVector()[0]);
|
||||
EXPECT_EQ(8, output.get(1).toGenericDict().at("7").toIntVector()[1]);
|
||||
}
|
||||
|
||||
bool called = false;
|
||||
|
@ -177,10 +177,10 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithTensorListOutp
|
||||
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPUTensorId), dummyTensor(DispatchKey::CUDATensorId), dummyTensor(DispatchKey::CPUTensorId));
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(3, result[0].toTensorListRef().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorListRef()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[0].toTensorListRef()[1]));
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorListRef()[2]));
|
||||
EXPECT_EQ(3, result[0].toTensorVector().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorVector()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[0].toTensorVector()[1]));
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorVector()[2]));
|
||||
}
|
||||
|
||||
c10::List<int64_t> kernelWithIntListOutput(const Tensor&, int64_t input1, int64_t input2, int64_t input3) {
|
||||
@ -196,10 +196,10 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntListOutput_
|
||||
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPUTensorId), 2, 4, 6);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(3, result[0].toIntListRef().size());
|
||||
EXPECT_EQ(2, result[0].toIntListRef()[0]);
|
||||
EXPECT_EQ(4, result[0].toIntListRef()[1]);
|
||||
EXPECT_EQ(6, result[0].toIntListRef()[2]);
|
||||
EXPECT_EQ(3, result[0].toIntVector().size());
|
||||
EXPECT_EQ(2, result[0].toIntVector()[0]);
|
||||
EXPECT_EQ(4, result[0].toIntVector()[1]);
|
||||
EXPECT_EQ(6, result[0].toIntVector()[2]);
|
||||
}
|
||||
|
||||
std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>> kernelWithMultipleOutputs(Tensor) {
|
||||
@ -226,9 +226,9 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithMultipleOutput
|
||||
EXPECT_EQ(5, result.size());
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[0].toTensor()));
|
||||
EXPECT_EQ(5, result[1].toInt());
|
||||
EXPECT_EQ(2, result[2].toTensorListRef().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[2].toTensorListRef()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[2].toTensorListRef()[1]));
|
||||
EXPECT_EQ(2, result[2].toTensorVector().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[2].toTensorVector()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[2].toTensorVector()[1]));
|
||||
EXPECT_EQ(0, result[3].toInt());
|
||||
auto result_dict = c10::impl::toTypedDict<string, Tensor>(result[4].toGenericDict());
|
||||
EXPECT_EQ(2, result_dict.size());
|
||||
|
@ -194,10 +194,10 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithTensorListOutpu
|
||||
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPUTensorId), dummyTensor(DispatchKey::CUDATensorId), dummyTensor(DispatchKey::CPUTensorId));
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(3, result[0].toTensorListRef().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorListRef()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[0].toTensorListRef()[1]));
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorListRef()[2]));
|
||||
EXPECT_EQ(3, result[0].toTensorVector().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorVector()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[0].toTensorVector()[1]));
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorVector()[2]));
|
||||
}
|
||||
|
||||
struct KernelWithIntListOutput final : OperatorKernel {
|
||||
@ -215,10 +215,10 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntListOutput_w
|
||||
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPUTensorId), 2, 4, 6);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(3, result[0].toIntListRef().size());
|
||||
EXPECT_EQ(2, result[0].toIntListRef()[0]);
|
||||
EXPECT_EQ(4, result[0].toIntListRef()[1]);
|
||||
EXPECT_EQ(6, result[0].toIntListRef()[2]);
|
||||
EXPECT_EQ(3, result[0].toIntVector().size());
|
||||
EXPECT_EQ(2, result[0].toIntVector()[0]);
|
||||
EXPECT_EQ(4, result[0].toIntVector()[1]);
|
||||
EXPECT_EQ(6, result[0].toIntVector()[2]);
|
||||
}
|
||||
|
||||
struct KernelWithMultipleOutputs final : OperatorKernel {
|
||||
@ -247,9 +247,9 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithMultipleOutputs
|
||||
EXPECT_EQ(5, result.size());
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[0].toTensor()));
|
||||
EXPECT_EQ(5, result[1].toInt());
|
||||
EXPECT_EQ(2, result[2].toTensorListRef().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[2].toTensorListRef()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[2].toTensorListRef()[1]));
|
||||
EXPECT_EQ(2, result[2].toTensorVector().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[2].toTensorVector()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[2].toTensorVector()[1]));
|
||||
EXPECT_EQ(0, result[3].toInt());
|
||||
auto result_dict = c10::impl::toTypedDict<string, Tensor>(result[4].toGenericDict());
|
||||
EXPECT_EQ(2, result_dict.size());
|
||||
|
@ -161,10 +161,10 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorList
|
||||
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPUTensorId), dummyTensor(DispatchKey::CUDATensorId), dummyTensor(DispatchKey::CPUTensorId));
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(3, result[0].toTensorListRef().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorListRef()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[0].toTensorListRef()[1]));
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorListRef()[2]));
|
||||
EXPECT_EQ(3, result[0].toTensorVector().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorVector()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[0].toTensorVector()[1]));
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorVector()[2]));
|
||||
}
|
||||
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntListOutput_whenRegistered_thenCanBeCalled) {
|
||||
@ -178,10 +178,10 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntListOut
|
||||
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPUTensorId), 2, 4, 6);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(3, result[0].toIntListRef().size());
|
||||
EXPECT_EQ(2, result[0].toIntListRef()[0]);
|
||||
EXPECT_EQ(4, result[0].toIntListRef()[1]);
|
||||
EXPECT_EQ(6, result[0].toIntListRef()[2]);
|
||||
EXPECT_EQ(3, result[0].toIntVector().size());
|
||||
EXPECT_EQ(2, result[0].toIntVector()[0]);
|
||||
EXPECT_EQ(4, result[0].toIntVector()[1]);
|
||||
EXPECT_EQ(6, result[0].toIntVector()[2]);
|
||||
}
|
||||
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithMultipleOutputs_whenRegistered_thenCanBeCalled) {
|
||||
@ -206,9 +206,9 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithMultipleOu
|
||||
EXPECT_EQ(5, result.size());
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[0].toTensor()));
|
||||
EXPECT_EQ(5, result[1].toInt());
|
||||
EXPECT_EQ(2, result[2].toTensorListRef().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[2].toTensorListRef()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[2].toTensorListRef()[1]));
|
||||
EXPECT_EQ(2, result[2].toTensorVector().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[2].toTensorVector()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[2].toTensorVector()[1]));
|
||||
EXPECT_EQ(0, result[3].toInt());
|
||||
auto result_dict = c10::impl::toTypedDict<string, Tensor>(result[4].toGenericDict());
|
||||
EXPECT_EQ(2, result_dict.size());
|
||||
@ -367,7 +367,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorList
|
||||
EXPECT_EQ(2, captured_input_list_size);
|
||||
}
|
||||
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorListRefInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorVectorInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> int", [] (const std::vector<Tensor>& input1) -> int64_t {
|
||||
return input1.size();
|
||||
@ -381,7 +381,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithTensorList
|
||||
EXPECT_EQ(2, outputs[0].toInt());
|
||||
}
|
||||
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithLegacyTensorListRefInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithLegacyTensorVectorInput_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> ()", [] (const std::vector<Tensor>& input1) -> void {
|
||||
captured_input_list_size = input1.size();
|
||||
@ -396,7 +396,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithLegacyTens
|
||||
EXPECT_EQ(2, captured_input_list_size);
|
||||
}
|
||||
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithLegacyTensorListRefInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithLegacyTensorVectorInput_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::tensor_list_input(Tensor[] input) -> int", [] (const std::vector<Tensor>& input1) -> int64_t {
|
||||
return input1.size();
|
||||
@ -451,7 +451,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithStringList
|
||||
c10::List<std::string> list({"value1", "value2"});
|
||||
auto outputs = callOp(*op, list);
|
||||
EXPECT_EQ(1, outputs.size());
|
||||
auto output = std::move(outputs[0]).toGenericList();
|
||||
auto output = std::move(outputs[0]).toList();
|
||||
|
||||
EXPECT_EQ(2, output.size());
|
||||
EXPECT_EQ("value1", output.get(0).toString()->string());
|
||||
@ -650,7 +650,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithListOfMap_
|
||||
c10::List<c10::Dict<string, int64_t>> list({dict1, dict2});
|
||||
auto outputs = callOp(*op, list);
|
||||
EXPECT_EQ(1, outputs.size());
|
||||
c10::impl::GenericList output = std::move(outputs[0]).toGenericList();
|
||||
c10::impl::GenericList output = std::move(outputs[0]).toList();
|
||||
|
||||
EXPECT_EQ(2, output.size());
|
||||
EXPECT_EQ(2, output.get(0).toGenericDict().size());
|
||||
@ -679,22 +679,22 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithListOfMapO
|
||||
c10::List<c10::Dict<string, c10::List<int64_t>>> list({ dict1, dict2 });
|
||||
auto outputs = callOp(*op, list);
|
||||
EXPECT_EQ(1, outputs.size());
|
||||
c10::impl::GenericList output = std::move(outputs[0]).toGenericList();
|
||||
c10::impl::GenericList output = std::move(outputs[0]).toList();
|
||||
|
||||
EXPECT_EQ(2, output.size());
|
||||
EXPECT_EQ(2, output.get(0).toGenericDict().size());
|
||||
EXPECT_EQ(2, output.get(0).toGenericDict().at("1").toIntListRef().size());
|
||||
EXPECT_EQ(1, output.get(0).toGenericDict().at("1").toIntListRef()[0]);
|
||||
EXPECT_EQ(2, output.get(0).toGenericDict().at("1").toIntListRef()[1]);
|
||||
EXPECT_EQ(2, output.get(0).toGenericDict().at("3").toIntListRef().size());
|
||||
EXPECT_EQ(3, output.get(0).toGenericDict().at("3").toIntListRef()[0]);
|
||||
EXPECT_EQ(4, output.get(0).toGenericDict().at("3").toIntListRef()[1]);
|
||||
EXPECT_EQ(2, output.get(1).toGenericDict().at("5").toIntListRef().size());
|
||||
EXPECT_EQ(5, output.get(1).toGenericDict().at("5").toIntListRef()[0]);
|
||||
EXPECT_EQ(6, output.get(1).toGenericDict().at("5").toIntListRef()[1]);
|
||||
EXPECT_EQ(2, output.get(1).toGenericDict().at("7").toIntListRef().size());
|
||||
EXPECT_EQ(7, output.get(1).toGenericDict().at("7").toIntListRef()[0]);
|
||||
EXPECT_EQ(8, output.get(1).toGenericDict().at("7").toIntListRef()[1]);
|
||||
EXPECT_EQ(2, output.get(0).toGenericDict().at("1").toIntVector().size());
|
||||
EXPECT_EQ(1, output.get(0).toGenericDict().at("1").toIntVector()[0]);
|
||||
EXPECT_EQ(2, output.get(0).toGenericDict().at("1").toIntVector()[1]);
|
||||
EXPECT_EQ(2, output.get(0).toGenericDict().at("3").toIntVector().size());
|
||||
EXPECT_EQ(3, output.get(0).toGenericDict().at("3").toIntVector()[0]);
|
||||
EXPECT_EQ(4, output.get(0).toGenericDict().at("3").toIntVector()[1]);
|
||||
EXPECT_EQ(2, output.get(1).toGenericDict().at("5").toIntVector().size());
|
||||
EXPECT_EQ(5, output.get(1).toGenericDict().at("5").toIntVector()[0]);
|
||||
EXPECT_EQ(6, output.get(1).toGenericDict().at("5").toIntVector()[1]);
|
||||
EXPECT_EQ(2, output.get(1).toGenericDict().at("7").toIntVector().size());
|
||||
EXPECT_EQ(7, output.get(1).toGenericDict().at("7").toIntVector()[0]);
|
||||
EXPECT_EQ(8, output.get(1).toGenericDict().at("7").toIntVector()[1]);
|
||||
}
|
||||
|
||||
TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenFallbackKernelWithoutAnyArguments_whenRegistered_thenCanBeCalled) {
|
||||
|
@ -156,10 +156,10 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithTensorListOutput
|
||||
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPUTensorId), dummyTensor(DispatchKey::CUDATensorId), dummyTensor(DispatchKey::CPUTensorId));
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(3, result[0].toTensorListRef().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorListRef()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[0].toTensorListRef()[1]));
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorListRef()[2]));
|
||||
EXPECT_EQ(3, result[0].toTensorVector().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorVector()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[0].toTensorVector()[1]));
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[0].toTensorVector()[2]));
|
||||
}
|
||||
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntListOutput_whenRegistered_thenCanBeCalled) {
|
||||
@ -172,10 +172,10 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntListOutput_wh
|
||||
|
||||
auto result = callOp(*op, dummyTensor(DispatchKey::CPUTensorId), 2, 4, 6);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(3, result[0].toIntListRef().size());
|
||||
EXPECT_EQ(2, result[0].toIntListRef()[0]);
|
||||
EXPECT_EQ(4, result[0].toIntListRef()[1]);
|
||||
EXPECT_EQ(6, result[0].toIntListRef()[2]);
|
||||
EXPECT_EQ(3, result[0].toIntVector().size());
|
||||
EXPECT_EQ(2, result[0].toIntVector()[0]);
|
||||
EXPECT_EQ(4, result[0].toIntVector()[1]);
|
||||
EXPECT_EQ(6, result[0].toIntVector()[2]);
|
||||
}
|
||||
|
||||
TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithMultipleOutputs_whenRegistered_thenCanBeCalled) {
|
||||
@ -201,9 +201,9 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithMultipleOutputs_
|
||||
EXPECT_EQ(5, result.size());
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[0].toTensor()));
|
||||
EXPECT_EQ(5, result[1].toInt());
|
||||
EXPECT_EQ(2, result[2].toTensorListRef().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[2].toTensorListRef()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[2].toTensorListRef()[1]));
|
||||
EXPECT_EQ(2, result[2].toTensorVector().size());
|
||||
EXPECT_EQ(DispatchKey::CPUTensorId, extractDispatchKey(result[2].toTensorVector()[0]));
|
||||
EXPECT_EQ(DispatchKey::CUDATensorId, extractDispatchKey(result[2].toTensorVector()[1]));
|
||||
EXPECT_EQ(0, result[3].toInt());
|
||||
auto result_dict = c10::impl::toTypedDict<string, Tensor>(result[4].toGenericDict());
|
||||
EXPECT_EQ(2, result_dict.size());
|
||||
|
@ -44,7 +44,7 @@ TypePtr IValue::type() const {
|
||||
return DictType::create(d.keyType(), d.valueType());
|
||||
}
|
||||
case Tag::GenericList:
|
||||
return ListType::create(toGenericList().elementType());
|
||||
return ListType::create(toList().elementType());
|
||||
case Tag::Future:
|
||||
return toFuture()->type();
|
||||
case Tag::Device:
|
||||
@ -88,10 +88,10 @@ std::ostream& printMaybeAnnotatedList(
|
||||
std::ostream& out,
|
||||
const IValue& the_list,
|
||||
IValueFormatter formatter) {
|
||||
if (the_list.toGenericListRef().size() == 0) {
|
||||
if (the_list.toListRef().size() == 0) {
|
||||
out << "annotate(" << the_list.type()->python_str() << ", [])";
|
||||
} else {
|
||||
return printList(out, the_list.toGenericListRef(), "[", "]", formatter);
|
||||
return printList(out, the_list.toListRef(), "[", "]", formatter);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
@ -220,7 +220,7 @@ std::ostream& operator<<(std::ostream & out, const IValue & v) {
|
||||
case IValue::Tag::Capsule:
|
||||
return out << "Capsule";
|
||||
case IValue::Tag::GenericList:
|
||||
return printList(out, v.toGenericList(), "[", "]", formatter);
|
||||
return printList(out, v.toList(), "[", "]", formatter);
|
||||
case IValue::Tag::Future:
|
||||
return out << "Future";
|
||||
case IValue::Tag::Uninitialized:
|
||||
|
@ -273,7 +273,7 @@ struct CAFFE2_API IValue final {
|
||||
bool isIntList() const;
|
||||
c10::List<int64_t> toIntList() &&;
|
||||
c10::List<int64_t> toIntList() const &;
|
||||
std::vector<int64_t> toIntListRef() const;
|
||||
std::vector<int64_t> toIntVector() const;
|
||||
|
||||
// ConstantString
|
||||
IValue(c10::intrusive_ptr<ivalue::ConstantString> v);
|
||||
@ -288,7 +288,7 @@ struct CAFFE2_API IValue final {
|
||||
bool isDoubleList() const;
|
||||
c10::List<double> toDoubleList() &&;
|
||||
c10::List<double> toDoubleList() const &;
|
||||
std::vector<double> toDoubleListRef() const;
|
||||
std::vector<double> toDoubleVector() const;
|
||||
|
||||
// BoolList
|
||||
bool isBoolList() const;
|
||||
@ -299,14 +299,14 @@ struct CAFFE2_API IValue final {
|
||||
bool isTensorList() const;
|
||||
c10::List<at::Tensor> toTensorList() &&;
|
||||
c10::List<at::Tensor> toTensorList() const &;
|
||||
std::vector<at::Tensor> toTensorListRef() const;
|
||||
std::vector<at::Tensor> toTensorVector() const;
|
||||
|
||||
//GenericList
|
||||
IValue(c10::List<IValue> v);
|
||||
bool isGenericList() const { return Tag::GenericList == tag; }
|
||||
c10::List<IValue> toGenericList() &&;
|
||||
c10::List<IValue> toGenericList() const &;
|
||||
c10::ArrayRef<IValue> toGenericListRef() const;
|
||||
bool isList() const { return Tag::GenericList == tag; }
|
||||
c10::List<IValue> toList() &&;
|
||||
c10::List<IValue> toList() const &;
|
||||
c10::ArrayRef<IValue> toListRef() const;
|
||||
|
||||
template<class T>
|
||||
IValue(c10::List<T> v);
|
||||
|
@ -446,7 +446,7 @@ DEFINE_TO(c10::List<int64_t>, toIntList)
|
||||
DEFINE_TO(c10::List<double>, toDoubleList)
|
||||
DEFINE_TO(c10::List<bool>, toBoolList)
|
||||
DEFINE_TO(c10::List<at::Tensor>, toTensorList)
|
||||
DEFINE_TO(c10::impl::GenericList, toGenericList)
|
||||
DEFINE_TO(c10::impl::GenericList, toList)
|
||||
DEFINE_TO(c10::impl::GenericDict, toGenericDict)
|
||||
DEFINE_TO(c10::intrusive_ptr<ivalue::Tuple>, toTuple)
|
||||
DEFINE_TO(std::string, toStringRef)
|
||||
@ -508,7 +508,7 @@ template <typename Elem>
|
||||
c10::List<Elem> generic_to(
|
||||
IValue ivalue,
|
||||
_fake_type<c10::List<Elem>>) {
|
||||
return impl::toTypedList<Elem>(std::move(ivalue).toGenericList());
|
||||
return impl::toTypedList<Elem>(std::move(ivalue).toList());
|
||||
}
|
||||
|
||||
template <typename Key, typename Value>
|
||||
@ -594,7 +594,7 @@ inline c10::List<int64_t> IValue::toIntList() const & {
|
||||
AT_ASSERT(isIntList(), "Expected IntList but got ", tagKind());
|
||||
return c10::List<int64_t>(toIntrusivePtr<c10::detail::ListImpl>());
|
||||
}
|
||||
inline std::vector<int64_t> IValue::toIntListRef() const {
|
||||
inline std::vector<int64_t> IValue::toIntVector() const {
|
||||
AT_ASSERT(isIntList(), "Expected IntList but got ", tagKind());
|
||||
return createVectorFromList<int64_t>(static_cast<const c10::detail::ListImpl*>(payload.as_intrusive_ptr));
|
||||
}
|
||||
@ -606,7 +606,7 @@ inline c10::List<double> IValue::toDoubleList() const & {
|
||||
AT_ASSERT(isDoubleList(), "Expected DoubleList but got ", tagKind());
|
||||
return c10::List<double>(toIntrusivePtr<c10::detail::ListImpl>());
|
||||
}
|
||||
inline std::vector<double> IValue::toDoubleListRef() const {
|
||||
inline std::vector<double> IValue::toDoubleVector() const {
|
||||
AT_ASSERT(isDoubleList(), "Expected DoubleList but got ", tagKind());
|
||||
return createVectorFromList<double>(static_cast<const c10::detail::ListImpl*>(payload.as_intrusive_ptr));
|
||||
}
|
||||
@ -626,20 +626,20 @@ inline c10::List<at::Tensor> IValue::toTensorList() const & {
|
||||
AT_ASSERT(isTensorList(), "Expected TensorList but got ", tagKind());
|
||||
return c10::List<at::Tensor>(toIntrusivePtr<c10::detail::ListImpl>());
|
||||
}
|
||||
inline std::vector<at::Tensor> IValue::toTensorListRef() const {
|
||||
inline std::vector<at::Tensor> IValue::toTensorVector() const {
|
||||
AT_ASSERT(isTensorList(), "Expected TensorList but got ", tagKind());
|
||||
return createVectorFromList<at::Tensor>(static_cast<const c10::detail::ListImpl*>(payload.as_intrusive_ptr));
|
||||
}
|
||||
inline c10::List<IValue> IValue::toGenericList() && {
|
||||
AT_ASSERT(isGenericList(), "Expected GenericList but got ", tagKind());
|
||||
inline c10::List<IValue> IValue::toList() && {
|
||||
AT_ASSERT(isList(), "Expected GenericList but got ", tagKind());
|
||||
return c10::List<IValue>(moveToIntrusivePtr<c10::detail::ListImpl>());
|
||||
}
|
||||
inline c10::List<IValue> IValue::toGenericList() const & {
|
||||
AT_ASSERT(isGenericList(), "Expected GenericList but got ", tagKind());
|
||||
inline c10::List<IValue> IValue::toList() const & {
|
||||
AT_ASSERT(isList(), "Expected GenericList but got ", tagKind());
|
||||
return c10::List<IValue>(toIntrusivePtr<c10::detail::ListImpl>());
|
||||
}
|
||||
inline c10::ArrayRef<IValue> IValue::toGenericListRef() const {
|
||||
AT_ASSERT(isGenericList(), "Expected GenericList but got ", tagKind());
|
||||
inline c10::ArrayRef<IValue> IValue::toListRef() const {
|
||||
AT_ASSERT(isList(), "Expected GenericList but got ", tagKind());
|
||||
return static_cast<const c10::detail::ListImpl*>(payload.as_intrusive_ptr)->list;
|
||||
}
|
||||
inline c10::Dict<IValue, IValue> IValue::toGenericDict() && {
|
||||
@ -689,7 +689,7 @@ inline IValue::IValue(c10::impl::GenericList v)
|
||||
}
|
||||
|
||||
template<class T> inline IValue::IValue(c10::List<T> v)
|
||||
: IValue(impl::toGenericList<T>(std::move(v))) {}
|
||||
: IValue(impl::toList<T>(std::move(v))) {}
|
||||
template<class T> inline IValue::IValue(at::ArrayRef<T> v)
|
||||
: IValue(c10::List<T>()) {
|
||||
auto list = to<c10::List<T>>();
|
||||
|
@ -1816,19 +1816,19 @@ ScalarTypeType() : EnumerationType() {}
|
||||
|
||||
inline bool IValue::isDoubleList() const {
|
||||
// note: avoids calling type() to avoid extra referencing counting for the returned type.
|
||||
return isGenericList() && static_cast<detail::ListImpl*>(payload.as_intrusive_ptr)->elementType->isSubtypeOf(FloatType::get());
|
||||
return isList() && static_cast<detail::ListImpl*>(payload.as_intrusive_ptr)->elementType->isSubtypeOf(FloatType::get());
|
||||
}
|
||||
|
||||
inline bool IValue::isTensorList() const {
|
||||
return isGenericList() && static_cast<detail::ListImpl*>(payload.as_intrusive_ptr)->elementType->isSubtypeOf(TensorType::get());
|
||||
return isList() && static_cast<detail::ListImpl*>(payload.as_intrusive_ptr)->elementType->isSubtypeOf(TensorType::get());
|
||||
}
|
||||
|
||||
inline bool IValue::isIntList() const {
|
||||
return isGenericList() && static_cast<detail::ListImpl*>(payload.as_intrusive_ptr)->elementType->isSubtypeOf(IntType::get());
|
||||
return isList() && static_cast<detail::ListImpl*>(payload.as_intrusive_ptr)->elementType->isSubtypeOf(IntType::get());
|
||||
}
|
||||
|
||||
inline bool IValue::isBoolList() const {
|
||||
return isGenericList() && static_cast<detail::ListImpl*>(payload.as_intrusive_ptr)->elementType->isSubtypeOf(BoolType::get());
|
||||
return isList() && static_cast<detail::ListImpl*>(payload.as_intrusive_ptr)->elementType->isSubtypeOf(BoolType::get());
|
||||
}
|
||||
|
||||
} // namespace c10
|
||||
|
@ -1066,7 +1066,7 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
|
||||
"(bool[] a) -> bool[]");
|
||||
testArgTypes<c10::List<std::string>>::test(
|
||||
c10::List<std::string>(), [] (const c10::List<std::string>& v) {EXPECT_EQ(0, v.size());},
|
||||
c10::List<std::string>(), [] (const IValue& v) {EXPECT_EQ(0, v.toGenericListRef().size());},
|
||||
c10::List<std::string>(), [] (const IValue& v) {EXPECT_EQ(0, v.toListRef().size());},
|
||||
"(str[] a) -> str[]");
|
||||
|
||||
|
||||
@ -1086,9 +1086,9 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
|
||||
testArgTypes<c10::List<std::string>>::test(
|
||||
c10::List<std::string>({"first", "second"}), [] (const c10::List<std::string>& v) {expectListEquals({"first", "second"}, v);},
|
||||
c10::List<std::string>({"first", "second"}), [] (const IValue& v) {
|
||||
EXPECT_EQ(2, v.toGenericListRef().size());
|
||||
EXPECT_EQ("first", v.toGenericListRef()[0].toStringRef());
|
||||
EXPECT_EQ("second", v.toGenericListRef()[1].toStringRef());
|
||||
EXPECT_EQ(2, v.toListRef().size());
|
||||
EXPECT_EQ("first", v.toListRef()[0].toStringRef());
|
||||
EXPECT_EQ("second", v.toListRef()[1].toStringRef());
|
||||
},
|
||||
"(str[] a) -> str[]");
|
||||
testArgTypes<c10::List<Tensor>>::test(
|
||||
@ -1116,7 +1116,7 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
|
||||
//Note: vector<bool> is not supported, use List<bool> instead.
|
||||
testArgTypes<std::vector<std::string>>::test<TestLegacyAPI>(
|
||||
std::vector<std::string>(), [] (const std::vector<std::string>& v) {EXPECT_EQ(0, v.size());},
|
||||
std::vector<std::string>(), [] (const IValue& v) {EXPECT_EQ(0, v.toGenericListRef().size());},
|
||||
std::vector<std::string>(), [] (const IValue& v) {EXPECT_EQ(0, v.toListRef().size());},
|
||||
"(str[] a) -> str[]");
|
||||
|
||||
|
||||
@ -1133,9 +1133,9 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
|
||||
testArgTypes<std::vector<std::string>>::test<TestLegacyAPI>(
|
||||
std::vector<std::string>({"first", "second"}), [] (const std::vector<std::string>& v) {expectListEquals({"first", "second"}, v);},
|
||||
std::vector<std::string>({"first", "second"}), [] (const IValue& v) {
|
||||
EXPECT_EQ(2, v.toGenericListRef().size());
|
||||
EXPECT_EQ("first", v.toGenericListRef()[0].toStringRef());
|
||||
EXPECT_EQ("second", v.toGenericListRef()[1].toStringRef());
|
||||
EXPECT_EQ(2, v.toListRef().size());
|
||||
EXPECT_EQ("first", v.toListRef()[0].toStringRef());
|
||||
EXPECT_EQ("second", v.toListRef()[1].toStringRef());
|
||||
},
|
||||
"(str[] a) -> str[]");
|
||||
testArgTypes<std::vector<Tensor>>::test<TestLegacyAPI>(
|
||||
|
@ -99,7 +99,7 @@ compute_input_size_(const std::vector<c10::IValue>& inputs) {
|
||||
// into that list. currently, this means that only tensors from that list
|
||||
// are accessible as inputs. any hypothetical input tensors that come after
|
||||
// the list are not accessible.
|
||||
return inputs[0].toTensorListRef().size();
|
||||
return inputs[0].toTensorVector().size();
|
||||
}
|
||||
// it's not a tensor list. Count the number of tensor inputs and return them.
|
||||
size_t num_tensor_inputs = 0;
|
||||
|
@ -187,7 +187,7 @@ class CAFFE2_API OperatorBase : public Observable<OperatorBase> {
|
||||
// if the first input is a tensor list, we get input tensors by indexing into that list.
|
||||
// currently, this means that only tensors from that list are accessible as inputs.
|
||||
// any hypothetical input tensors that come after the list are not accessible.
|
||||
auto tensorList = newstyle_inputs_[0].toTensorListRef();
|
||||
auto tensorList = newstyle_inputs_[0].toTensorVector();
|
||||
DCHECK_LT((size_t)idx, tensorList.size());
|
||||
ival = tensorList[idx];
|
||||
} else {
|
||||
@ -714,7 +714,7 @@ inline NetDef OperatorBase::GetSingleArgument<NetDef>(
|
||||
template <>
|
||||
inline vector<int> OperatorBase::GetVectorFromIValueList<int>(
|
||||
const c10::IValue& value) const {
|
||||
auto vs = value.toIntListRef();
|
||||
auto vs = value.toIntVector();
|
||||
vector<int> out;
|
||||
out.reserve(vs.size());
|
||||
for (int64_t v : vs) {
|
||||
@ -726,7 +726,7 @@ inline vector<int> OperatorBase::GetVectorFromIValueList<int>(
|
||||
template <>
|
||||
inline vector<float> OperatorBase::GetVectorFromIValueList<float>(
|
||||
const c10::IValue& value) const {
|
||||
const auto& vs = value.toDoubleListRef();
|
||||
const auto& vs = value.toDoubleVector();
|
||||
vector<float> out;
|
||||
out.reserve(vs.size());
|
||||
for (double v : vs) {
|
||||
|
@ -25,7 +25,7 @@ void testIValue() {
|
||||
ASSERT_TRUE(foo2.isDouble());
|
||||
ASSERT_EQ(foo2.toDouble(), 4.0);
|
||||
ASSERT_EQ(foo.use_count(), 2);
|
||||
ASSERT_TRUE(baz.toIntListRef() == std::vector<int64_t>({3, 4, 5}));
|
||||
ASSERT_TRUE(baz.toIntVector() == std::vector<int64_t>({3, 4, 5}));
|
||||
|
||||
auto move_it = std::move(baz).toIntList();
|
||||
ASSERT_EQ(foo.use_count(), 2);
|
||||
@ -35,11 +35,11 @@ void testIValue() {
|
||||
ASSERT_EQ(i.toInt(), 4);
|
||||
IValue dlist(c10::List<double>({3.5}));
|
||||
ASSERT_TRUE(dlist.isDoubleList());
|
||||
ASSERT_TRUE(dlist.toDoubleListRef() == std::vector<double>({3.5}));
|
||||
ASSERT_TRUE(dlist.toDoubleVector() == std::vector<double>({3.5}));
|
||||
std::move(dlist).toDoubleList();
|
||||
ASSERT_TRUE(dlist.isNone());
|
||||
dlist = IValue(c10::List<double>({3.4}));
|
||||
ASSERT_TRUE(dlist.toDoubleListRef() == std::vector<double>({3.4}));
|
||||
ASSERT_TRUE(dlist.toDoubleVector() == std::vector<double>({3.4}));
|
||||
IValue the_list(
|
||||
at::ivalue::Tuple::create({IValue(3.4), IValue(4), IValue(foo)}));
|
||||
ASSERT_EQ(foo.use_count(), 3);
|
||||
|
@ -43,7 +43,7 @@ bool check_ivalue_equality(const c10::IValue& ivalue_python, const c10::IValue&
|
||||
// `IValue` constructor), and here we check that all elements in the `ExpandingArray`
|
||||
// are equal to the Python `int` attribute.
|
||||
if (ivalue_python.isInt() && ivalue_cpp.isIntList()) {
|
||||
auto ivalue_cpp_list = ivalue_cpp.toIntListRef();
|
||||
auto ivalue_cpp_list = ivalue_cpp.toIntVector();
|
||||
std::vector<int64_t> ivalue_python_vec(ivalue_cpp_list.size());
|
||||
std::fill(ivalue_python_vec.begin(), ivalue_python_vec.end(), ivalue_python.toInt());
|
||||
return ivalue_python_vec == ivalue_cpp_list;
|
||||
@ -79,7 +79,7 @@ bool check_ivalue_equality(const c10::IValue& ivalue_python, const c10::IValue&
|
||||
} else if (ivalue_python.isTensor()) {
|
||||
return check_tensor_equality(ivalue_python.toTensor(), ivalue_cpp.toTensor());
|
||||
} else if (ivalue_python.isIntList()) {
|
||||
return ivalue_python.toIntListRef() == ivalue_cpp.toIntListRef();
|
||||
return ivalue_python.toIntVector() == ivalue_cpp.toIntVector();
|
||||
} else if (ivalue_python.isNone()) {
|
||||
return ivalue_cpp.isNone();
|
||||
} else {
|
||||
|
@ -99,7 +99,7 @@ def jit_type_of(arg):
|
||||
FROM_IVALUE = {
|
||||
'Device': '{}.toDevice()',
|
||||
'Device?': '{}.toOptional<c10::Device>()',
|
||||
'IntArrayRef': '{}.toIntListRef()',
|
||||
'IntArrayRef': '{}.toIntVector()',
|
||||
'Layout': '{}.toLayout()',
|
||||
'Layout?': '{}.toOptional<c10::Layout>()',
|
||||
'MemoryFormat': '{}.toMemoryFormat()',
|
||||
@ -112,7 +112,7 @@ FROM_IVALUE = {
|
||||
'Tensor': '{}.toTensor()',
|
||||
'Tensor?': 'toOptionalTensor({})',
|
||||
'Tensor?[]': 'toListOfOptionalTensor({})',
|
||||
'TensorList': '{}.toTensorListRef()',
|
||||
'TensorList': '{}.toTensorVector()',
|
||||
'bool': '{}.toBool()',
|
||||
'bool?': '{}.toOptional<bool>()',
|
||||
'double': '{}.toDouble()',
|
||||
|
@ -63,7 +63,7 @@ at::Tensor toOptionalTensor(const IValue& v) {
|
||||
// tensor type in interpreter, it should only be used in this file
|
||||
std::vector<Tensor> toListOfOptionalTensor(const IValue& v) {
|
||||
// v is a list of optional tensor, loop over as generic list
|
||||
auto vlist = v.toGenericListRef();
|
||||
auto vlist = v.toListRef();
|
||||
std::vector<Tensor> res;
|
||||
|
||||
for (const IValue &v: vlist) {
|
||||
|
@ -61,12 +61,12 @@ c10::optional<Value*> tryInsertConstant(
|
||||
attr::value, std::vector<int64_t>(bool_list.begin(), bool_list.end()));
|
||||
n->output()->setType(ListType::ofBools());
|
||||
} else if (val.isIntList()) {
|
||||
n->is_(attr::value, val.toIntListRef());
|
||||
n->is_(attr::value, val.toIntVector());
|
||||
n->output()->setType(ListType::ofInts());
|
||||
} else if (val.isTensorList()) {
|
||||
n->ts_(
|
||||
attr::value,
|
||||
fmap(val.toTensorListRef(), [](const at::Tensor& t) {
|
||||
fmap(val.toTensorVector(), [](const at::Tensor& t) {
|
||||
AT_ASSERT(!t.requires_grad());
|
||||
return t;
|
||||
}));
|
||||
|
@ -117,7 +117,7 @@ script::Module ScriptModuleDeserializer::LEGACY_deserialize() {
|
||||
AT_ASSERT(proto_version < 6);
|
||||
if (proto_version == 2) {
|
||||
const auto& list =
|
||||
LEGACY_loadPickleArchive("attributes.pkl").toGenericList();
|
||||
LEGACY_loadPickleArchive("attributes.pkl").toList();
|
||||
LEGACY_pickled_ivalues_.insert(
|
||||
LEGACY_pickled_ivalues_.end(), list.begin(), list.end());
|
||||
} else if (proto_version >= 3) {
|
||||
|
@ -31,11 +31,11 @@ void _convolution_kernel(const c10::OperatorHandle& op, Stack* stack) {
|
||||
(std::move(peek(*stack, 0, 12))).toTensor(),
|
||||
(std::move(peek(*stack, 1, 12))).toTensor(),
|
||||
toOptionalTensor((std::move(peek(*stack, 2, 12)))),
|
||||
(std::move(peek(*stack, 3, 12))).toIntListRef(),
|
||||
(std::move(peek(*stack, 4, 12))).toIntListRef(),
|
||||
(std::move(peek(*stack, 5, 12))).toIntListRef(),
|
||||
(std::move(peek(*stack, 3, 12))).toIntVector(),
|
||||
(std::move(peek(*stack, 4, 12))).toIntVector(),
|
||||
(std::move(peek(*stack, 5, 12))).toIntVector(),
|
||||
(std::move(peek(*stack, 6, 12))).toBool(),
|
||||
(std::move(peek(*stack, 7, 12))).toIntListRef(),
|
||||
(std::move(peek(*stack, 7, 12))).toIntVector(),
|
||||
(std::move(peek(*stack, 8, 12))).toInt(),
|
||||
(std::move(peek(*stack, 9, 12))).toBool(),
|
||||
(std::move(peek(*stack, 10, 12))).toBool(),
|
||||
@ -53,9 +53,9 @@ void conv2d_kernel(const c10::OperatorHandle& op, Stack* stack) {
|
||||
(std::move(peek(*stack, 0, 7))).toTensor(),
|
||||
(std::move(peek(*stack, 1, 7))).toTensor(),
|
||||
toOptionalTensor((std::move(peek(*stack, 2, 7)))),
|
||||
(std::move(peek(*stack, 3, 7))).toIntListRef(),
|
||||
(std::move(peek(*stack, 4, 7))).toIntListRef(),
|
||||
(std::move(peek(*stack, 5, 7))).toIntListRef(),
|
||||
(std::move(peek(*stack, 3, 7))).toIntVector(),
|
||||
(std::move(peek(*stack, 4, 7))).toIntVector(),
|
||||
(std::move(peek(*stack, 5, 7))).toIntVector(),
|
||||
(std::move(peek(*stack, 6, 7))).toInt()
|
||||
);
|
||||
drop(*stack, 7);
|
||||
@ -67,7 +67,7 @@ void view_kernel(const c10::OperatorHandle& op, Stack* stack) {
|
||||
at::AutoNonVariableTypeMode non_var_type_mode(true);
|
||||
#endif
|
||||
auto result_ = ((std::move(peek(*stack, 0, 2))).toTensor()).view(
|
||||
(std::move(peek(*stack, 1, 2))).toIntListRef()
|
||||
(std::move(peek(*stack, 1, 2))).toIntVector()
|
||||
);
|
||||
drop(*stack, 2);
|
||||
pack(*stack, std::move(result_));
|
||||
@ -75,7 +75,7 @@ void view_kernel(const c10::OperatorHandle& op, Stack* stack) {
|
||||
|
||||
void permute_kernel(const c10::OperatorHandle& op, Stack* stack) {
|
||||
auto result_ = ((std::move(peek(*stack, 0, 2))).toTensor()).permute(
|
||||
(std::move(peek(*stack, 1, 2))).toIntListRef()
|
||||
(std::move(peek(*stack, 1, 2))).toIntVector()
|
||||
);
|
||||
drop(*stack, 2);
|
||||
pack(*stack, std::move(result_));
|
||||
@ -83,7 +83,7 @@ void permute_kernel(const c10::OperatorHandle& op, Stack* stack) {
|
||||
|
||||
void cat_kernel(const c10::OperatorHandle& op, Stack* stack) {
|
||||
auto result_ = at::cat(
|
||||
(std::move(peek(*stack, 0, 2))).toTensorListRef(),
|
||||
(std::move(peek(*stack, 0, 2))).toTensorVector(),
|
||||
(std::move(peek(*stack, 1, 2))).toInt()
|
||||
);
|
||||
drop(*stack, 2);
|
||||
|
@ -18,15 +18,15 @@ IValue deepCopy(const IValue& self) {
|
||||
}
|
||||
if (self.isTensorList()) {
|
||||
c10::List<at::Tensor> newList;
|
||||
for (const at::Tensor& oldTensor : self.toTensorListRef()) {
|
||||
for (const at::Tensor& oldTensor : self.toTensorVector()) {
|
||||
newList.push_back(oldTensor.clone(at::MemoryFormat::Preserve));
|
||||
}
|
||||
return newList;
|
||||
}
|
||||
|
||||
// Lists of ivalues should recursively deep copy their contents
|
||||
if (self.isGenericList()) {
|
||||
auto source = std::move(self).toGenericList();
|
||||
if (self.isList()) {
|
||||
auto source = std::move(self).toList();
|
||||
auto newList = c10::impl::GenericList(source.elementType());
|
||||
newList.reserve(source.size());
|
||||
for (const IValue& value : source) {
|
||||
@ -68,7 +68,7 @@ bool deepEquals(const IValue& lhs, const IValue& rhs) {
|
||||
} else if (lhs.isNone() && rhs.isNone()) {
|
||||
return true;
|
||||
} else if (lhs.isIntList() && rhs.isIntList()) {
|
||||
return lhs.toIntListRef() == rhs.toIntListRef();
|
||||
return lhs.toIntVector() == rhs.toIntVector();
|
||||
} else if (lhs.isTensor() && rhs.isTensor()) {
|
||||
return lhs.toTensor().equal(rhs.toTensor());
|
||||
}
|
||||
|
@ -59,21 +59,21 @@ void Pickler::pushIValueImpl(const IValue& ivalue) {
|
||||
} else if (ivalue.isIntList()) {
|
||||
pushSpecializedList(
|
||||
ivalue, "build_intlist", [=](const IValue& ivalue) {
|
||||
for (const int64_t item : ivalue.toIntListRef()) {
|
||||
for (const int64_t item : ivalue.toIntVector()) {
|
||||
pushInt(item);
|
||||
}
|
||||
});
|
||||
} else if (ivalue.isTensorList()) {
|
||||
pushSpecializedList(
|
||||
ivalue, "build_tensorlist", [=](const IValue& ivalue) {
|
||||
for (const at::Tensor& item : ivalue.toTensorListRef()) {
|
||||
for (const at::Tensor& item : ivalue.toTensorVector()) {
|
||||
pushIValue(item);
|
||||
}
|
||||
});
|
||||
} else if (ivalue.isDoubleList()) {
|
||||
pushSpecializedList(
|
||||
ivalue, "build_doublelist", [=](const IValue& ivalue) {
|
||||
for (double item : ivalue.toDoubleListRef()) {
|
||||
for (double item : ivalue.toDoubleVector()) {
|
||||
pushDouble(item);
|
||||
}
|
||||
});
|
||||
@ -84,9 +84,9 @@ void Pickler::pushIValueImpl(const IValue& ivalue) {
|
||||
pushBool(item);
|
||||
}
|
||||
});
|
||||
// note: isGenericList must be after isIntList and friends because
|
||||
// isGenericList is true for all lists.
|
||||
} else if (ivalue.isGenericList()) {
|
||||
// note: isList must be after isIntList and friends because
|
||||
// isList is true for all lists.
|
||||
} else if (ivalue.isList()) {
|
||||
pushGenericList(ivalue);
|
||||
} else if (ivalue.isObject()) {
|
||||
auto obj = ivalue.toObject();
|
||||
@ -472,7 +472,7 @@ size_t Pickler::pushNextBinPut() {
|
||||
}
|
||||
|
||||
void Pickler::pushGenericList(const IValue& ivalue) {
|
||||
auto list = ivalue.toGenericListRef();
|
||||
auto list = ivalue.toListRef();
|
||||
push<PickleOpCode>(PickleOpCode::EMPTY_LIST);
|
||||
|
||||
push<PickleOpCode>(PickleOpCode::MARK);
|
||||
|
@ -639,8 +639,8 @@ inline py::object toPyObject(IValue ivalue) {
|
||||
return py::cast(std::move(ivalue).toBool());
|
||||
} else if (ivalue.isString()) {
|
||||
return py::cast(std::move(ivalue).toStringRef());
|
||||
} else if (ivalue.isGenericList()) {
|
||||
auto list = std::move(ivalue).toGenericList();
|
||||
} else if (ivalue.isList()) {
|
||||
auto list = std::move(ivalue).toList();
|
||||
py::list t{list.size()};
|
||||
for (size_t i = 0; i < list.size(); ++i) {
|
||||
t[i] = toPyObject(IValue{list.get(i)});
|
||||
|
@ -67,14 +67,14 @@ Operator createOperatorFromC10(const c10::OperatorHandle& op) {
|
||||
const auto& elem_type = type->expect<ListType>()->getElementType();
|
||||
if (elem_type->isSubtypeOf(TensorType::get())) {
|
||||
AT_ASSERT(iter->isTensorList());
|
||||
auto list = iter->toTensorListRef();
|
||||
auto list = iter->toTensorVector();
|
||||
tracer::addInputs(node, args[i].name().c_str(), list);
|
||||
} else if (elem_type->kind() == TypeKind::FloatType) {
|
||||
AT_ASSERT(iter->isDoubleList());
|
||||
// NB: now, tracer doesn't support tracing double list. We add special
|
||||
// handling here, since in our case, we assume that all the doubles
|
||||
// in the list are constants
|
||||
auto value = iter->toDoubleListRef();
|
||||
auto value = iter->toDoubleVector();
|
||||
std::vector<Value*> info(value.size());
|
||||
for (size_t value_index = 0; value_index < value.size(); ++value_index) {
|
||||
info[value_index] = graph->insertConstant(value[value_index]);
|
||||
@ -85,7 +85,7 @@ Operator createOperatorFromC10(const c10::OperatorHandle& op) {
|
||||
} else if (elem_type->kind() == TypeKind::IntType) {
|
||||
AT_ASSERT(iter->isIntList());
|
||||
tracer::addInputs(
|
||||
node, args[i].name().c_str(), iter->toIntListRef());
|
||||
node, args[i].name().c_str(), iter->toIntVector());
|
||||
} else if (elem_type->kind() == TypeKind::BoolType) {
|
||||
AT_ASSERT(iter->isBoolList());
|
||||
tracer::addInputs(
|
||||
|
@ -633,7 +633,7 @@ RegisterOperators reg(
|
||||
std::vector<torch::autograd::Variable> gradients;
|
||||
|
||||
if (!grad_outputs.isNone()) {
|
||||
for (const IValue& v : grad_outputs.toGenericListRef()) {
|
||||
for (const IValue& v : grad_outputs.toListRef()) {
|
||||
gradients.emplace_back(v.isNone() ? at::Tensor() : v.toTensor());
|
||||
}
|
||||
}
|
||||
@ -670,7 +670,7 @@ RegisterOperators reg(
|
||||
std::vector<torch::autograd::Variable> gradients;
|
||||
|
||||
if (!grad_tensors.isNone()) {
|
||||
for (const IValue& v : grad_tensors.toGenericListRef()) {
|
||||
for (const IValue& v : grad_tensors.toListRef()) {
|
||||
gradients.emplace_back(v.isNone() ? at::Tensor() : v.toTensor());
|
||||
}
|
||||
}
|
||||
@ -757,7 +757,7 @@ RegisterOperators reg(
|
||||
size.reserve(8);
|
||||
for (size_t i = 0; i < num_inputs; ++i) {
|
||||
size = at::infer_size(
|
||||
size, peek(stack, i, num_inputs).toIntListRef());
|
||||
size, peek(stack, i, num_inputs).toIntVector());
|
||||
}
|
||||
drop(stack, num_inputs);
|
||||
push(stack, IValue(std::move(size)));
|
||||
@ -904,7 +904,7 @@ RegisterOperators reg(
|
||||
break;
|
||||
}
|
||||
} else if (v.isTensorList()) {
|
||||
for (const at::Tensor& t : v.toTensorListRef()) {
|
||||
for (const at::Tensor& t : v.toTensorVector()) {
|
||||
if (t.defined()) {
|
||||
result = true;
|
||||
}
|
||||
@ -953,7 +953,7 @@ RegisterOperators reg(
|
||||
} else {
|
||||
push(
|
||||
stack,
|
||||
at::sum_to(self.toTensor(), size.toIntListRef()));
|
||||
at::sum_to(self.toTensor(), size.toIntVector()));
|
||||
}
|
||||
return 0;
|
||||
},
|
||||
@ -963,8 +963,8 @@ RegisterOperators reg(
|
||||
[](Stack& stack) {
|
||||
IValue self_size, other_size;
|
||||
pop(stack, self_size, other_size);
|
||||
auto s = self_size.toIntListRef();
|
||||
auto o = other_size.toIntListRef();
|
||||
auto s = self_size.toIntVector();
|
||||
auto o = other_size.toIntVector();
|
||||
if (s == o) {
|
||||
push(stack, IValue());
|
||||
} else {
|
||||
@ -1121,7 +1121,7 @@ RegisterOperators reg(
|
||||
};
|
||||
} else {
|
||||
return [=](Stack& stack) {
|
||||
auto list = pop(stack).toGenericList();
|
||||
auto list = pop(stack).toList();
|
||||
TORCH_CHECK(
|
||||
list.size() == num_outputs,
|
||||
"Expected ",
|
||||
@ -2303,7 +2303,7 @@ Operation dictConstructFromList(const Node* node) {
|
||||
static_cast<const DictType*>(output_type.get())->getValueType();
|
||||
return [key_type, value_type](Stack& stack) {
|
||||
auto input_list = pop(stack);
|
||||
auto list_ref = input_list.toGenericListRef();
|
||||
auto list_ref = input_list.toListRef();
|
||||
auto dict = c10::impl::GenericDict(key_type, value_type);
|
||||
dict.reserve(list_ref.size());
|
||||
for (const auto& input : list_ref) {
|
||||
@ -3269,7 +3269,7 @@ Operation sort_op(
|
||||
bool copy_return_list) {
|
||||
return [lt_func, has_reverse_arg, copy_return_list](Stack& stack) {
|
||||
bool reverse = has_reverse_arg ? pop(stack).toBool() : false;
|
||||
auto g_list = pop(stack).toGenericList();
|
||||
auto g_list = pop(stack).toList();
|
||||
if (copy_return_list) {
|
||||
g_list = g_list.copy();
|
||||
}
|
||||
@ -3336,14 +3336,14 @@ std::vector<int64_t> _output_size(
|
||||
std::vector<int64_t> repeated(dim, size.toInt());
|
||||
return repeated;
|
||||
} else {
|
||||
return size.toIntListRef();
|
||||
return size.toIntVector();
|
||||
}
|
||||
}
|
||||
std::vector<double> scale_repeated;
|
||||
if (scale_factors.isDouble()) {
|
||||
scale_repeated = std::vector<double>(dim, scale_factors.toDouble());
|
||||
} else {
|
||||
scale_repeated = scale_factors.toDoubleListRef();
|
||||
scale_repeated = scale_factors.toDoubleVector();
|
||||
}
|
||||
std::vector<int64_t> ret;
|
||||
for (size_t i = 0; i < dim; ++i) {
|
||||
@ -3526,7 +3526,7 @@ IValue convert_scale_factor_to_double(const IValue& int_ivalue) {
|
||||
if (int_ivalue.isInt()) {
|
||||
scale_factor_double = static_cast<double>(int_ivalue.toInt());
|
||||
} else if (int_ivalue.isIntList()) {
|
||||
auto int_list = int_ivalue.toIntListRef();
|
||||
auto int_list = int_ivalue.toIntVector();
|
||||
std::vector<double> double_vec(int_list.begin(), int_list.end());
|
||||
scale_factor_double = double_vec;
|
||||
} else if (int_ivalue.isNone()) {
|
||||
|
@ -66,13 +66,13 @@ at::Tensor castTensorTo(
|
||||
|
||||
std::vector<int64_t> compute_sizes(const IValue& seq) {
|
||||
std::vector<int64_t> sizes;
|
||||
auto seq_recur = seq.toGenericList();
|
||||
auto seq_recur = seq.toList();
|
||||
while (true) {
|
||||
sizes.push_back(seq_recur.size());
|
||||
if (seq_recur.size() == 0 || !seq_recur.get(0).isGenericList()) {
|
||||
if (seq_recur.size() == 0 || !seq_recur.get(0).isList()) {
|
||||
break;
|
||||
}
|
||||
seq_recur = seq_recur.get(0).toGenericList();
|
||||
seq_recur = seq_recur.get(0).toList();
|
||||
}
|
||||
return sizes;
|
||||
}
|
||||
@ -117,7 +117,7 @@ void recursiveStore(
|
||||
const IValue& obj) {
|
||||
auto ndim = sizes.size();
|
||||
auto n = sizes[dim];
|
||||
auto seq = obj.toGenericListRef();
|
||||
auto seq = obj.toListRef();
|
||||
checkSequenceSize(n, dim, seq.size());
|
||||
if (dim + 1 < static_cast<long>(ndim)) {
|
||||
for (int64_t i = 0; i < n; i++) {
|
||||
@ -200,7 +200,7 @@ RegisterOperators reg({
|
||||
|
||||
auto result = at::split_with_sizes(
|
||||
(std::move(peek(stack, 0, 3))).toTensor(),
|
||||
(std::move(peek(stack, 1, 3))).toIntListRef(),
|
||||
(std::move(peek(stack, 1, 3))).toIntVector(),
|
||||
(std::move(peek(stack, 2, 3))).toInt());
|
||||
drop(stack, 3);
|
||||
pack(stack, std::move(result));
|
||||
@ -227,7 +227,7 @@ RegisterOperators reg({
|
||||
RECORD_FUNCTION("sizes", last(stack, 2));
|
||||
|
||||
auto list = peek(stack, 0, 2).toIntList().copy();
|
||||
auto defaults = peek(stack, 1, 2).toIntListRef();
|
||||
auto defaults = peek(stack, 1, 2).toIntVector();
|
||||
drop(stack, 2);
|
||||
|
||||
AT_ASSERT(defaults.size() > list.size());
|
||||
@ -244,7 +244,7 @@ RegisterOperators reg({
|
||||
[](Stack& stack) {
|
||||
auto a = pop(stack);
|
||||
auto b = pop(stack);
|
||||
push(stack, at::infer_size(a.toIntListRef(), b.toIntListRef()));
|
||||
push(stack, at::infer_size(a.toIntVector(), b.toIntVector()));
|
||||
return 0;
|
||||
},
|
||||
aliasAnalysisFromSchema()),
|
||||
@ -325,7 +325,7 @@ RegisterOperators reg({
|
||||
[](Stack& stack) {
|
||||
auto a = pop(stack);
|
||||
auto b = pop(stack);
|
||||
push(stack, at::infer_size(a.toIntListRef(), b.toIntListRef()));
|
||||
push(stack, at::infer_size(a.toIntVector(), b.toIntVector()));
|
||||
return 0;
|
||||
},
|
||||
aliasAnalysisFromSchema()),
|
||||
|
@ -554,9 +554,9 @@ bool ivalue_tags_match(const Module& lhs, const Module& rhs) {
|
||||
for (size_t i = 0; i < at->elements().size(); ++i) {
|
||||
work.emplace_back(Work{at->elements().at(i), bt->elements().at(i)});
|
||||
}
|
||||
} else if (item.a.isGenericList()) {
|
||||
auto al = item.a.toGenericList();
|
||||
auto bl = item.b.toGenericList();
|
||||
} else if (item.a.isList()) {
|
||||
auto al = item.a.toList();
|
||||
auto bl = item.b.toList();
|
||||
for (size_t i = 0; i < al.size(); ++i) {
|
||||
work.emplace_back(Work{al.get(i), bl.get(i)});
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ Value* TracingState::getValue(const IValue& var) {
|
||||
->insertNode(graph->createList(
|
||||
TensorType::get(),
|
||||
fmap(
|
||||
var.toTensorListRef(),
|
||||
var.toTensorVector(),
|
||||
[&](const IValue& val) { return getValue(val); })))
|
||||
->output();
|
||||
} else if (var.isTuple()) {
|
||||
@ -205,7 +205,7 @@ Value* TracingState::getOutput(const IValue& iv, size_t i) {
|
||||
->insertNode(graph->createList(
|
||||
TensorType::get(),
|
||||
fmap(
|
||||
iv.toTensorListRef(),
|
||||
iv.toTensorVector(),
|
||||
[&](const IValue& ival) { return getOutput(ival, i); })))
|
||||
->output();
|
||||
} else if (iv.isTuple()) {
|
||||
@ -265,8 +265,8 @@ static IValue addInput(const std::shared_ptr<TracingState> & state, const IValue
|
||||
|
||||
return std::move(dict);
|
||||
} else if (auto list_type = type->cast<ListType>()) {
|
||||
size_t num_elems = input.isGenericList() ? input.toGenericListRef().size()
|
||||
: input.toTensorListRef().size();
|
||||
size_t num_elems = input.isList() ? input.toListRef().size()
|
||||
: input.toTensorVector().size();
|
||||
auto list_unpack = state->graph->insertNode(state->graph->createListUnpack(value, num_elems));
|
||||
auto unpack_outputs = list_unpack->outputs();
|
||||
|
||||
@ -277,7 +277,7 @@ static IValue addInput(const std::shared_ptr<TracingState> & state, const IValue
|
||||
}
|
||||
return elems;
|
||||
} else {
|
||||
auto elems = input.toGenericList();
|
||||
auto elems = input.toList();
|
||||
for (size_t i = 0; i < num_elems; i++) {
|
||||
elems[i] = addInput(state, elems.get(i), list_type->getElementType(), unpack_outputs[i]);
|
||||
}
|
||||
@ -401,8 +401,8 @@ void TracingState::setValue(const IValue& v, Value* value) {
|
||||
for (size_t i = 0; i < outputs.size(); ++i) {
|
||||
setValue(outputs[i], unpack_node->outputs()[i]);
|
||||
}
|
||||
} else if (v.isGenericList()) {
|
||||
auto elements = v.toGenericListRef();
|
||||
} else if (v.isList()) {
|
||||
auto elements = v.toListRef();
|
||||
Node* unpack_node =
|
||||
graph->insertNode(graph->createListUnpack(value, elements.size()));
|
||||
for (size_t i = 0; i < elements.size(); ++i) {
|
||||
|
@ -100,11 +100,11 @@ void restoreAccurateTypeTags(const IValue& root, const TypePtr& type_tag) {
|
||||
case ListType::Kind: {
|
||||
// specialized lists do not need their type refined, so we can exit
|
||||
// early here
|
||||
if (!w.value.isGenericList()) {
|
||||
if (!w.value.isList()) {
|
||||
break;
|
||||
}
|
||||
auto elem_type = w.static_type->cast<ListType>()->getElementType();
|
||||
auto lst = w.value.toGenericList();
|
||||
auto lst = w.value.toList();
|
||||
lst.unsafeSetElementType(elem_type);
|
||||
for (const IValue& item : lst) {
|
||||
Work elem = {elem_type, item};
|
||||
@ -217,7 +217,7 @@ static std::vector<int64_t> tupleToIntList(const IValue& v) {
|
||||
template <typename T>
|
||||
static std::vector<T> convertList(const IValue& v) {
|
||||
return fmap(
|
||||
v.toGenericListRef(), [](const IValue& elem) { return elem.to<T>(); });
|
||||
v.toListRef(), [](const IValue& elem) { return elem.to<T>(); });
|
||||
}
|
||||
|
||||
PickleOpCode Unpickler::readInstruction() {
|
||||
@ -438,7 +438,7 @@ void Unpickler::readGlobal(
|
||||
});
|
||||
} else if (class_name == "IntList") {
|
||||
globals_.emplace_back([this] {
|
||||
stack_.back().toGenericList().unsafeSetElementType(IntType::get());
|
||||
stack_.back().toList().unsafeSetElementType(IntType::get());
|
||||
});
|
||||
} else {
|
||||
AT_ERROR("Unknown pickler class id", class_name);
|
||||
@ -471,7 +471,7 @@ void Unpickler::readGlobal(
|
||||
// Unpickle a list specialization (e.g. List[Tensor], List[int], ...)
|
||||
globals_.emplace_back([this, elem_type] {
|
||||
// Pop reduce arg off the stack
|
||||
auto data = stack_.back().toTuple()->elements().at(0).toGenericList();
|
||||
auto data = stack_.back().toTuple()->elements().at(0).toList();
|
||||
stack_.pop_back();
|
||||
data.unsafeSetElementType(elem_type);
|
||||
stack_.emplace_back(std::move(data));
|
||||
@ -685,8 +685,8 @@ void Unpickler::readList(IValue list_ivalue) {
|
||||
for (const auto& elem : elements) {
|
||||
list.push_back(elem.toBool());
|
||||
}
|
||||
} else if (list_ivalue.isGenericList()) {
|
||||
auto list = std::move(list_ivalue).toGenericList();
|
||||
} else if (list_ivalue.isList()) {
|
||||
auto list = std::move(list_ivalue).toList();
|
||||
list.reserve(num_elements);
|
||||
for (const auto& elem : elements) {
|
||||
list.emplace_back(elem);
|
||||
|
Reference in New Issue
Block a user