-
Notifications
You must be signed in to change notification settings - Fork 364
feat(//core/): Add support for Split converter and unpack evaluator #283
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to C++ style guidelines:
diff --git a/workspace/core/conversion/conversion.cpp b/tmp/changes.txt
index fe3815a..d5d8d01 100644
--- a/workspace/core/conversion/conversion.cpp
+++ b/tmp/changes.txt
@@ -175,45 +175,46 @@ void AddInputs(ConversionCtx* ctx, at::ArrayRef<const torch::jit::Value*> inputs
}
void MarkOutputs(ConversionCtx* ctx, at::ArrayRef<const torch::jit::Value*> outputs) {
-
for (auto out : outputs) {
auto it = ctx->value_tensor_map.find(out);
- if (it == ctx->value_tensor_map.end()){
- if (ctx->evaluated_value_map.find(out) != ctx->evaluated_value_map.end()){
+ if (it == ctx->value_tensor_map.end()) {
+ if (ctx->evaluated_value_map.find(out) != ctx->evaluated_value_map.end()) {
auto out_ivalue = ctx->evaluated_value_map[out];
- if (out_ivalue.isList()){
+ if (out_ivalue.isList()) {
auto output_list = out_ivalue.toList();
LOG_DEBUG("One of the outputs is a TensorList. output_list size: " << output_list.size());
- for (int i=0; i<output_list.size(); i++){
+ for (int i = 0; i < output_list.size(); i++) {
std::string name = std::string("output_") + std::to_string(ctx->num_outputs);
auto output_container = output_list.get(i).toCustomClass<TensorContainer>();
nvinfer1::ITensor* out_tensor = output_container.get()->tensor();
out_tensor->setName(name.c_str());
ctx->net->markOutput(*out_tensor);
- LOG_INFO(ctx->logger, "Marking Output " << out->debugName() << " named " << name << " in engine (ctx.MarkOutput)");
+ LOG_INFO(
+ ctx->logger,
+ "Marking Output " << out->debugName() << " named " << name << " in engine (ctx.MarkOutput)");
ctx->num_outputs += 1;
}
- } else if(out_ivalue.isCustomClass()){
+ } else if (out_ivalue.isCustomClass()) {
std::string name = std::string("output_") + std::to_string(ctx->num_outputs);
auto output_container = out_ivalue.toCustomClass<TensorContainer>();
nvinfer1::ITensor* out_tensor = output_container.get()->tensor();
out_tensor->setName(name.c_str());
ctx->net->markOutput(*out_tensor);
- LOG_INFO(ctx->logger, "Marking Output " << out->debugName() << " named " << name << " in engine (ctx.MarkOutput)");
+ LOG_INFO(
+ ctx->logger, "Marking Output " << out->debugName() << " named " << name << " in engine (ctx.MarkOutput)");
ctx->num_outputs += 1;
- }
- else{
+ } else {
TRTORCH_THROW_ERROR("Unknown output type. Only a single tensor or a TensorList type is supported.");
}
}
- }
- else{
+ } else {
std::string name = std::string("output_") + std::to_string(ctx->num_outputs);
auto out_tensor = it->second;
out_tensor->setName(name.c_str());
ctx->net->markOutput(*out_tensor);
- LOG_INFO(ctx->logger, "Marking Output " << out->debugName() << " named " << name << " in engine (ctx.MarkOutput)");
+ LOG_INFO(
+ ctx->logger, "Marking Output " << out->debugName() << " named " << name << " in engine (ctx.MarkOutput)");
ctx->num_outputs += 1;
}
}
@@ -368,18 +369,22 @@ void ConvertBlockToNetDef(
} else if (to_eval) {
auto eval = EvaluateNode(ctx, n);
if (eval) {
- if (n->outputs().size() > 1){ // For ListUnpack scenario
- if (eval.value().isList()){
- LOG_DEBUG(ctx->logger, "Found the evaluated value to be a list" << eval.value() << " for node: " << util::node_info(n));
+ if (n->outputs().size() > 1) { // For ListUnpack scenario
+ if (eval.value().isList()) {
+ LOG_DEBUG(
+ ctx->logger,
+ "Found the evaluated value to be a list" << eval.value() << " for node: " << util::node_info(n));
auto eval_list = eval.value().toList();
- TRTORCH_CHECK(eval_list.size() == n->outputs().size(), "Size of evaluated results: " << eval_list.size() << " and node outputs size: " << n->outputs().size() << " must match.");
- for (int i=0; i<eval_list.size(); i++){
+ TRTORCH_CHECK(
+ eval_list.size() == n->outputs().size(),
+ "Size of evaluated results: " << eval_list.size() << " and node outputs size: " << n->outputs().size()
+ << " must match.");
+ for (int i = 0; i < eval_list.size(); i++) {
auto eval_output = eval_list.get(i);
ctx->AssociateValueAndIValue(n->output(i), eval_output);
}
}
- }
- else if (!eval.value().isTensor()) {
+ } else if (!eval.value().isTensor()) {
LOG_DEBUG(ctx->logger, "Found the value to be: " << eval.value());
ctx->AssociateValueAndIValue(n->output(0), eval.value());
} else {
diff --git a/workspace/core/conversion/converters/impl/select.cpp b/tmp/changes.txt
index bc79456..cea2f5f 100644
--- a/workspace/core/conversion/converters/impl/select.cpp
+++ b/tmp/changes.txt
@@ -1,14 +1,14 @@
-#include "NvInfer.h"
-#include "core/conversion/converters/converters.h"
-#include "core/util/prelude.h"
-#include "torch/torch.h"
-#include "torch/csrc/jit/ir/ir.h"
+#include <ATen/ATen.h>
+#include <vector>
#include "ATen/core/ivalue.h"
#include "ATen/core/stack.h"
+#include "NvInfer.h"
#include "c10/util/intrusive_ptr.h"
+#include "core/conversion/converters/converters.h"
#include "core/conversion/tensorcontainer/TensorContainer.h"
-#include <ATen/ATen.h>
-#include <vector>
+#include "core/util/prelude.h"
+#include "torch/csrc/jit/ir/ir.h"
+#include "torch/torch.h"
namespace trtorch {
namespace core {
@@ -18,22 +18,21 @@ namespace impl {
namespace {
bool add_split(ConversionCtx* ctx, const torch::jit::Node* n, args& args, bool split_list) {
-
auto in = args[0].ITensor();
auto axis = args[2].unwrapToInt();
auto inDimSize = in->getDimensions().d[axis];
auto numOutputs = 1;
std::vector<int64_t> sizes;
- if (split_list){
+ if (split_list) {
sizes = args[1].unwrapToIntList().vec();
numOutputs = sizes.size();
- } else{
+ } else {
auto split_size = args[1].unwrapToInt();
numOutputs = inDimSize / split_size;
- if (numOutputs == 1){
+ if (numOutputs == 1) {
sizes.push_back(split_size);
- }else {
+ } else {
sizes = std::vector<int64_t>(numOutputs, 1);
}
}
@@ -46,8 +45,8 @@ bool add_split(ConversionCtx* ctx, const torch::jit::Node* n, args& args, bool s
list.reserve(numOutputs);
int start_idx = 0;
- for (int i=0; i<numOutputs; i++){
- at::Tensor indices = torch::arange(start_idx, start_idx+sizes[i], 1).to(torch::kI32);
+ for (int i = 0; i < numOutputs; i++) {
+ at::Tensor indices = torch::arange(start_idx, start_idx + sizes[i], 1).to(torch::kI32);
auto indicesTensor = tensor_to_const(ctx, indices);
auto gather_layer = ctx->net->addGather(*in, *indicesTensor, axis);
@@ -63,7 +62,6 @@ bool add_split(ConversionCtx* ctx, const torch::jit::Node* n, args& args, bool s
auto split_output_ivalue = std::move(torch::jit::IValue(list));
auto out = ctx->AssociateValueAndIValue(n->outputs()[0], split_output_ivalue);
-
}
auto select_registrations TRTORCH_UNUSED =
@@ -226,30 +224,24 @@ auto select_registrations TRTORCH_UNUSED =
return true;
}})
- .pattern(
- {"aten::split(Tensor self, int[] split_sizes, int dim=0) -> (Tensor[])",
+ .pattern({"aten::split(Tensor self, int[] split_sizes, int dim=0) -> (Tensor[])",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
-
add_split(ctx, n, args, true);
LOG_DEBUG("Converted split op into a list of IValues");
return true;
- }})
- .pattern(
- {"aten::split.Tensor(Tensor(a) self, int split_size, int dim=0) -> (Tensor[])",
- [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
-
- add_split(ctx, n, args, false);
- LOG_DEBUG("Converted split op into a list of IValues");
- return true;
- }})
- .pattern(
- {"aten::split_with_sizes(Tensor(a) self, int[] split_sizes, int dim=0) -> (Tensor[])",
- [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
-
- add_split(ctx, n, args, true);
- LOG_DEBUG("Converted split op into a list of IValues");
- return true;
- }});
+ }})
+ .pattern({"aten::split.Tensor(Tensor(a) self, int split_size, int dim=0) -> (Tensor[])",
+ [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
+ add_split(ctx, n, args, false);
+ LOG_DEBUG("Converted split op into a list of IValues");
+ return true;
+ }})
+ .pattern({"aten::split_with_sizes(Tensor(a) self, int[] split_sizes, int dim=0) -> (Tensor[])",
+ [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
+ add_split(ctx, n, args, true);
+ LOG_DEBUG("Converted split op into a list of IValues");
+ return true;
+ }});
} // namespace
} // namespace impl
diff --git a/workspace/tests/core/conversion/converters/test_select.cpp b/tmp/changes.txt
index fe7bbdb..c2c66aa 100644
--- a/workspace/tests/core/conversion/converters/test_select.cpp
+++ b/tmp/changes.txt
@@ -228,7 +228,7 @@ TEST(Converters, ATenSplitSizesInScriptingConvertsCorrectly) {
auto trt_in = at::clone(in);
auto trt_results = trtorch::tests::util::RunGraphEngine(g, params, {trt_in});
- for(int i=0; i < jit_results.size(); i++){
+ for (int i = 0; i < jit_results.size(); i++) {
auto trt = trt_results[i].reshape(jit_results[i].sizes());
ASSERT_TRUE(trtorch::tests::util::almostEqual(jit_results[i], trt, 2e-6));
}
@@ -256,7 +256,7 @@ TEST(Converters, ATenSplitSizesinTracingConvertsCorrectly) {
auto trt_in = at::clone(in);
auto trt_results = trtorch::tests::util::RunGraphEngine(g, params, {trt_in});
- for(int i=0; i < jit_results.size(); i++){
+ for (int i = 0; i < jit_results.size(); i++) {
auto trt = trt_results[i].reshape(jit_results[i].sizes());
ASSERT_TRUE(trtorch::tests::util::almostEqual(jit_results[i], trt, 2e-6));
}
@@ -283,7 +283,7 @@ TEST(Converters, ATenSplitFixedConvertsCorrectly) {
auto trt_in = at::clone(in);
auto trt_results = trtorch::tests::util::RunGraphEngine(g, params, {trt_in});
- for(int i=0; i < jit_results.size(); i++){
+ for (int i = 0; i < jit_results.size(); i++) {
auto trt = trt_results[i].reshape(jit_results[i].sizes());
ASSERT_TRUE(trtorch::tests::util::almostEqual(jit_results[i], trt, 2e-6));
}
ERROR: Some files do not conform to style guidelines
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Code conforms to Python style guidelines
Signed-off-by: Dheeraj Peri <[email protected]>
One part of the issue #267 should be resolved by this PR. I need to look into other details in the issue. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Code conforms to C++ style guidelines
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Code conforms to Python style guidelines
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Code conforms to Python style guidelines
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Code conforms to C++ style guidelines
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Code conforms to C++ style guidelines
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Code conforms to Python style guidelines
core/conversion/conversion.cpp
Outdated
if (it == ctx->value_tensor_map.end()) { | ||
if (ctx->evaluated_value_map.find(out) != ctx->evaluated_value_map.end()) { | ||
auto out_ivalue = ctx->evaluated_value_map[out]; | ||
if (out_ivalue.isList()) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I still think we cant really handle this output case yet since we don't have the program synthesis components in compiler.cpp and the RunCudaEngine function to properly return this to the user.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes. That's right. Initially I've been trying various split graphs which is when this was needed before our discussion. I've been testing it using trtorchexec and it worked with that. But the test cases use a different API to construct/run the graph and it would be a problem. Removed the code. The split graph testcases have unpack so it should be fine. We shall look into better way of handling different output types later. (Related issue opened : https://github.com/NVIDIA/TRTorch/issues/284)
@@ -32,6 +32,12 @@ auto prim_registrations = | |||
[](const torch::jit::Node* n, kwargs& args) -> c10::optional<torch::jit::IValue> { | |||
return at::scalar_to_tensor(args.at(n->output(0)).IValue()->toScalar()); | |||
}}) | |||
.evaluator({torch::jit::prim::ListUnpack, |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can we do a list unpack test?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Added a test for list unpack.
core/conversion/conversion.cpp
Outdated
if (eval.value().isList()) { | ||
LOG_DEBUG( | ||
ctx->logger, | ||
"Found the evaluated value to be a list" << eval.value() << " for node: " << util::node_info(n)); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is a misleading message i feel as this case is when there are multiple outputs from an evaluator. It makes seem like the object returned is a singular list and not a list of results.
core/conversion/conversion.cpp
Outdated
@@ -337,12 +369,28 @@ void ConvertBlockToNetDef( | |||
} else if (to_eval) { | |||
auto eval = EvaluateNode(ctx, n); | |||
if (eval) { | |||
if (!eval.value().isTensor()) { | |||
if (n->outputs().size() > 1) { // For ListUnpack scenario | |||
if (eval.value().isList()) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I feel like tuples feel like the better type to use here. I think functionally they operate the same but cognitively they are more in line with how people think about multiple returns from functions.
<< " must match."); | ||
for (int i = 0; i < eval_list.size(); i++) { | ||
auto eval_output = eval_list.get(i); | ||
ctx->AssociateValueAndIValue(n->output(i), eval_output); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We should move the debug statement above to here and list out all found results.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done
core/conversion/evaluators/prim.cpp
Outdated
[](const torch::jit::Node* n, kwargs& args) -> c10::optional<torch::jit::IValue> { | ||
// Outputs is an IValue which has list of tensors which can be found in ctx->evaluated_value_map | ||
const torch::jit::IValue* outputs = args.at(n->input()).IValue(); | ||
return *std::move(outputs); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You might be able to use something like this to quickly switch to a tuple https://github.com/pytorch/pytorch/blob/22a34bcf4e5eaa348f0117c414c3dd760ec64b13/test/cpp/api/jit.cpp#L91
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done
Signed-off-by: Dheeraj Peri <[email protected]>
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to C++ style guidelines:
diff --git a/workspace/core/conversion/conversion.cpp b/tmp/changes.txt
index e5f1e94..cd1f0ad 100644
--- a/workspace/core/conversion/conversion.cpp
+++ b/tmp/changes.txt
@@ -374,8 +374,8 @@ void ConvertBlockToNetDef(
auto eval_list = eval.value().toTuple();
TRTORCH_CHECK(
eval_list->elements().size() == n->outputs().size(),
- "Size of evaluated results: " << eval_list->elements().size() << " and node outputs size: " << n->outputs().size()
- << " must match.");
+ "Size of evaluated results: " << eval_list->elements().size()
+ << " and node outputs size: " << n->outputs().size() << " must match.");
for (int i = 0; i < eval_list->elements().size(); i++) {
auto eval_output = eval_list.get()->elements()[i];
LOG_DEBUG(
@@ -384,8 +384,7 @@ void ConvertBlockToNetDef(
ctx->AssociateValueAndIValue(n->output(i), eval_output);
}
} else {
- TRTORCH_THROW_ERROR(
- "Unsupported return type for evaluated node");
+ TRTORCH_THROW_ERROR("Unsupported return type for evaluated node");
}
} else if (!eval.value().isTensor()) {
LOG_DEBUG(ctx->logger, "Found the value to be: " << eval.value());
diff --git a/workspace/tests/util/evaluate_graph.cpp b/tmp/changes.txt
index 814c077..65e4449 100644
--- a/workspace/tests/util/evaluate_graph.cpp
+++ b/tmp/changes.txt
@@ -5,8 +5,8 @@
#include "core/conversion/converters/converters.h"
#include "core/conversion/evaluators/evaluators.h"
#include "core/conversion/var/Var.h"
-#include "core/util/prelude.h"
#include "core/util/jit_util.h"
+#include "core/util/prelude.h"
namespace trtorch {
namespace tests {
@@ -30,17 +30,18 @@ std::vector<torch::jit::IValue> EvaluateGraph(const torch::jit::Block* b, std::v
if (eval) {
if (eval.value().isTuple()) {
auto eval_list = eval.value().toTuple();
- for (int i = 0; i < eval_list->elements().size(); i++){
+ for (int i = 0; i < eval_list->elements().size(); i++) {
auto eval_output = eval_list.get()->elements()[i];
LOG_DEBUG(
ctx->logger,
- "Found the evaluated value(s) to be " << eval_output << " for node: " << trtorch::core::util::node_info(n));
+ "Found the evaluated value(s) to be " << eval_output
+ << " for node: " << trtorch::core::util::node_info(n));
ctx->AssociateValueAndIValue(n->output(i), eval_output);
}
- } else if(!eval.value().isTensor()){
+ } else if (!eval.value().isTensor()) {
LOG_DEBUG("Found the value to be: " << eval.value());
ctx->AssociateValueAndIValue(n->output(0), eval.value());
- }else {
+ } else {
LOG_DEBUG("Found the value to be a tensor (shape " << eval.value().toTensor().sizes() << ')');
ctx->AssociateValueAndIValue(n->output(0), eval.value());
}
ERROR: Some files do not conform to style guidelines
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Code conforms to Python style guidelines
Signed-off-by: Dheeraj Peri <[email protected]>
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Code conforms to C++ style guidelines
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Code conforms to Python style guidelines
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Code conforms to C++ style guidelines
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Code conforms to Python style guidelines
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
Description
Support aten::split and prim::ListUnpack
Fixes #267 and #259
Type of change
Please delete options that are not relevant and/or add your own.
Checklist: