From 1778011b40f3858b03684a54b1492ffc86848810 Mon Sep 17 00:00:00 2001 From: Brenton Chu Date: Fri, 22 Apr 2022 23:37:39 -0700 Subject: [PATCH 1/8] Added bitwise_not test --- .../conversion/converters/test_bitwise.cpp | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 tests/core/conversion/converters/test_bitwise.cpp diff --git a/tests/core/conversion/converters/test_bitwise.cpp b/tests/core/conversion/converters/test_bitwise.cpp new file mode 100644 index 0000000000..b4ac0dc280 --- /dev/null +++ b/tests/core/conversion/converters/test_bitwise.cpp @@ -0,0 +1,24 @@ +#include "core/compiler.h" +#include "gtest/gtest.h" +#include "tests/util/util.h" +#include "torch/csrc/jit/ir/irparser.h" + +TEST(Converters, ATenBitwiseNotConvertsCorrectly) { + const auto graph = R"IR( + graph(%0 : Tensor): + %3 : Tensor = aten::bitwise_not(%0) + return (%3))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::randint(-128, 128, {10}, {at::kCUDA}); + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + in = at::clone(in); + params = torch_tensorrt::core::ir::get_static_params(g->inputs, {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE(torch_tensorrt::tests::util::exactlyEqual(jit_results[0], trt_results[0]); +} From d6645d3173c7f84d14dd967f266dce58983fb946 Mon Sep 17 00:00:00 2001 From: Brenton Chu Date: Fri, 6 May 2022 14:36:22 -0700 Subject: [PATCH 2/8] added bitwise_not converter --- core/conversion/converters/BUILD | 1 + core/conversion/converters/impl/bitwise.cpp | 52 +++++++++++++++++++ tests/core/conversion/converters/BUILD | 5 ++ .../conversion/converters/test_bitwise.cpp | 47 +++++++++++------ 4 files changed, 89 insertions(+), 16 deletions(-) create mode 100644 core/conversion/converters/impl/bitwise.cpp diff --git a/core/conversion/converters/BUILD b/core/conversion/converters/BUILD index f9948eea7d..ff28a4a892 100755 --- a/core/conversion/converters/BUILD +++ b/core/conversion/converters/BUILD @@ -54,6 +54,7 @@ cc_library( "NodeConverterRegistry.cpp", "impl/activation.cpp", "impl/batch_norm.cpp", + "impl/bitwise.cpp", "impl/cast.cpp", "impl/concat.cpp", "impl/constant.cpp", diff --git a/core/conversion/converters/impl/bitwise.cpp b/core/conversion/converters/impl/bitwise.cpp new file mode 100644 index 0000000000..0d12f71575 --- /dev/null +++ b/core/conversion/converters/impl/bitwise.cpp @@ -0,0 +1,52 @@ +#include "core/conversion/converters/converters.h" +#include "core/util/prelude.h" + +#include + +namespace torch_tensorrt { +namespace core { +namespace conversion { +namespace converters { +namespace impl { + + +auto bitwisenot TORCHTRT_UNUSED = + RegisterNodeConversionPatterns() + .pattern({"aten::bitwise_not(Tensor self) -> Tensor", + [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { + auto in = args[0].ITensorOrFreeze(ctx); + nvinfer1::ILayer* out; + + if(in->getType() == nvinfer1::DataType::kINT32) { + // Integer case + auto one = torch::tensor({1}, util::TRTDataTypeToScalarType(in->getType())); + auto one_const = tensor_to_const(ctx, one); + auto neg = ctx->net->addUnary(*in, nvinfer1::UnaryOperation::kNEG); + TORCHTRT_CHECK(neg, "Unable to create neg unary layer from node: " << *n); + out = add_elementwise( + ctx, nvinfer1::ElementWiseOperation::kSUB, neg->getOutput(0), + one_const, util::node_info(n)); + TORCHTRT_CHECK(out, "Unable to create sub layer from node: " << *n); + } else if(in->getType() == nvinfer1::DataType::kBOOL) { + // Boolean case + out = ctx->net->addUnary(*in, nvinfer1::UnaryOperation::kNOT); + TORCHTRT_CHECK(out, "Unable to create logical not layer from node: " << *n); + } else { + LOG_ERROR("Input tensor must be 32 bit integer or boolean"); + return false; + } + + out->setName(util::node_info(n).c_str()); + auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], + out->getOutput(0)); + LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions()); + + return true; + }}); + + +} // namespace impl +} // namespace converters +} // namespace conversion +} // namespace core +} // namespace torch_tensorrt diff --git a/tests/core/conversion/converters/BUILD b/tests/core/conversion/converters/BUILD index 3dc7865b9e..5843acae75 100644 --- a/tests/core/conversion/converters/BUILD +++ b/tests/core/conversion/converters/BUILD @@ -15,6 +15,10 @@ converter_test( name = "test_batch_norm", ) +converter_test( + name = "test_bitwise", +) + converter_test( name = "test_instance_norm", ) @@ -136,6 +140,7 @@ test_suite( tests = [ ":test_activation", ":test_batch_norm", + ":test_bitwise", ":test_instance_norm", ":test_cast", ":test_clone", diff --git a/tests/core/conversion/converters/test_bitwise.cpp b/tests/core/conversion/converters/test_bitwise.cpp index b4ac0dc280..e0fa940035 100644 --- a/tests/core/conversion/converters/test_bitwise.cpp +++ b/tests/core/conversion/converters/test_bitwise.cpp @@ -1,24 +1,39 @@ +#include #include "core/compiler.h" #include "gtest/gtest.h" #include "tests/util/util.h" #include "torch/csrc/jit/ir/irparser.h" -TEST(Converters, ATenBitwiseNotConvertsCorrectly) { - const auto graph = R"IR( - graph(%0 : Tensor): - %3 : Tensor = aten::bitwise_not(%0) - return (%3))IR"; +std::string gen_test_graph() { + return R"IR( + graph(%0: Tensor): + %3 : Tensor = aten::bitwise_not(%0) + return (%3))IR"; +} - auto g = std::make_shared(); - torch::jit::parseIR(graph, g.get()); +#define test_bitwise_not(dtype) \ + TEST(Converters, ATenBitwiseNot##dtype##ConvertsCorrectly) { \ + const auto graph = gen_test_graph(); \ + \ + auto g = std::make_shared(); \ + torch::jit::parseIR(graph, g.get()); \ + \ + at::Tensor in; \ + if (strcmp(#dtype, "Integer") == 0) \ + in = at::randint(-128, 128, {10}, {at::kCUDA}).toType(at::kInt); \ + if (strcmp(#dtype, "Boolean") == 0) \ + in = at::randint(0, 1, {10}, {at::kCUDA}).toType(at::kBool); \ + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); \ + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); \ + \ + in = at::clone(in); \ + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); \ + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); \ + \ + ASSERT_TRUE(torch_tensorrt::tests::util::exactlyEqual(jit_results[0], trt_results[0]));\ + } - auto in = at::randint(-128, 128, {10}, {at::kCUDA}); - auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); +test_bitwise_not(Integer); +test_bitwise_not(Boolean); - in = at::clone(in); - params = torch_tensorrt::core::ir::get_static_params(g->inputs, {}); - auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); - - ASSERT_TRUE(torch_tensorrt::tests::util::exactlyEqual(jit_results[0], trt_results[0]); -} +#undef test_bitwise_not From 3e27790a6a830b41279be6536041855e2b78a34e Mon Sep 17 00:00:00 2001 From: Brenton Chu Date: Thu, 26 May 2022 11:46:39 -0700 Subject: [PATCH 3/8] updated test to add dtype data to converter input --- tests/util/run_graph_engine.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/util/run_graph_engine.cpp b/tests/util/run_graph_engine.cpp index df52b54b26..86d999ab62 100644 --- a/tests/util/run_graph_engine.cpp +++ b/tests/util/run_graph_engine.cpp @@ -4,6 +4,7 @@ #include "core/ir/ir.h" #include "core/runtime/runtime.h" #include "core/util/prelude.h" +#include "core/util/trt_util.h" #include "cuda_runtime_api.h" #include "torch/csrc/jit/ir/ir.h" #include "torch/csrc/jit/ir/irparser.h" @@ -19,7 +20,7 @@ namespace util { std::vector toInputs(std::vector ten) { std::vector a; for (auto i : ten) { - a.push_back(core::ir::Input(core::util::toVec(i.sizes()))); + a.push_back(core::ir::Input(core::util::toVec(i.sizes()), core::util::ScalarTypeToTRTDataType(i.scalar_type()))); } return std::move(a); } From c0bcffb2a2f9a80ed4f4ccaab98243bdaab4b044 Mon Sep 17 00:00:00 2001 From: Brenton Chu Date: Tue, 31 May 2022 17:32:13 -0700 Subject: [PATCH 4/8] type conversion to int for equality check in bitwise test --- .../conversion/converters/test_bitwise.cpp | 43 ++++++++++--------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/tests/core/conversion/converters/test_bitwise.cpp b/tests/core/conversion/converters/test_bitwise.cpp index e0fa940035..dd1d0a2f21 100644 --- a/tests/core/conversion/converters/test_bitwise.cpp +++ b/tests/core/conversion/converters/test_bitwise.cpp @@ -11,26 +11,29 @@ std::string gen_test_graph() { return (%3))IR"; } -#define test_bitwise_not(dtype) \ - TEST(Converters, ATenBitwiseNot##dtype##ConvertsCorrectly) { \ - const auto graph = gen_test_graph(); \ - \ - auto g = std::make_shared(); \ - torch::jit::parseIR(graph, g.get()); \ - \ - at::Tensor in; \ - if (strcmp(#dtype, "Integer") == 0) \ - in = at::randint(-128, 128, {10}, {at::kCUDA}).toType(at::kInt); \ - if (strcmp(#dtype, "Boolean") == 0) \ - in = at::randint(0, 1, {10}, {at::kCUDA}).toType(at::kBool); \ - auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); \ - auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); \ - \ - in = at::clone(in); \ - params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); \ - auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); \ - \ - ASSERT_TRUE(torch_tensorrt::tests::util::exactlyEqual(jit_results[0], trt_results[0]));\ +#define test_bitwise_not(dtype) \ + TEST(Converters, ATenBitwiseNot##dtype##ConvertsCorrectly) { \ + const auto graph = gen_test_graph(); \ + \ + auto g = std::make_shared(); \ + torch::jit::parseIR(graph, g.get()); \ + \ + at::Tensor in; \ + if (strcmp(#dtype, "Integer") == 0) \ + in = at::randint(-128, 128, {10}, {at::kCUDA}).toType(at::kInt); \ + if (strcmp(#dtype, "Boolean") == 0) \ + in = at::randint(0, 1, {10}, {at::kCUDA}).toType(at::kBool); \ + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); \ + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); \ + \ + in = at::clone(in); \ + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); \ + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); \ + \ + auto jit_int = jit_results[0].toType(at::kInt); \ + auto trt_int = trt_results[0].toType(at::kInt); \ + \ + ASSERT_TRUE(torch_tensorrt::tests::util::exactlyEqual(jit_int, trt_int)); \ } test_bitwise_not(Integer); From 3a77121c319ff777e9ba4f37915854f0301e3b95 Mon Sep 17 00:00:00 2001 From: Brenton Chu Date: Wed, 1 Jun 2022 17:35:13 -0700 Subject: [PATCH 5/8] use kprod with -1 instead of kneg for negation --- core/conversion/converters/impl/bitwise.cpp | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/core/conversion/converters/impl/bitwise.cpp b/core/conversion/converters/impl/bitwise.cpp index 0d12f71575..cc52a5e287 100644 --- a/core/conversion/converters/impl/bitwise.cpp +++ b/core/conversion/converters/impl/bitwise.cpp @@ -19,14 +19,16 @@ auto bitwisenot TORCHTRT_UNUSED = if(in->getType() == nvinfer1::DataType::kINT32) { // Integer case - auto one = torch::tensor({1}, util::TRTDataTypeToScalarType(in->getType())); - auto one_const = tensor_to_const(ctx, one); - auto neg = ctx->net->addUnary(*in, nvinfer1::UnaryOperation::kNEG); - TORCHTRT_CHECK(neg, "Unable to create neg unary layer from node: " << *n); + auto neg_one = torch::tensor({-1}, util::TRTDataTypeToScalarType(in->getType())); + auto neg_one_const = tensor_to_const(ctx, neg_one); + auto neg = add_elementwise( + ctx, nvinfer1::ElementWiseOperation::kPROD, in, + neg_one_const, util::node_info(n) + std::string("_Negation")); + TORCHTRT_CHECK(neg, "Unable to create prod layer from node: " << *n); out = add_elementwise( - ctx, nvinfer1::ElementWiseOperation::kSUB, neg->getOutput(0), - one_const, util::node_info(n)); - TORCHTRT_CHECK(out, "Unable to create sub layer from node: " << *n); + ctx, nvinfer1::ElementWiseOperation::kSUM, neg->getOutput(0), + neg_one_const, util::node_info(n) + std::string("_SubOne")); + TORCHTRT_CHECK(out, "Unable to create sum layer from node: " << *n); } else if(in->getType() == nvinfer1::DataType::kBOOL) { // Boolean case out = ctx->net->addUnary(*in, nvinfer1::UnaryOperation::kNOT); From 166b09d1adc4b5d18b5fc992c81d0170810be7e9 Mon Sep 17 00:00:00 2001 From: Brenton Chu Date: Fri, 3 Jun 2022 11:02:41 -0700 Subject: [PATCH 6/8] fix lint --- core/conversion/converters/impl/bitwise.cpp | 19 ++++++++++++------- .../conversion/converters/test_bitwise.cpp | 8 ++++---- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/core/conversion/converters/impl/bitwise.cpp b/core/conversion/converters/impl/bitwise.cpp index cc52a5e287..76a78bf146 100644 --- a/core/conversion/converters/impl/bitwise.cpp +++ b/core/conversion/converters/impl/bitwise.cpp @@ -18,16 +18,22 @@ auto bitwisenot TORCHTRT_UNUSED = nvinfer1::ILayer* out; if(in->getType() == nvinfer1::DataType::kINT32) { - // Integer case + // Integer case, using ~x = -x - 1 auto neg_one = torch::tensor({-1}, util::TRTDataTypeToScalarType(in->getType())); auto neg_one_const = tensor_to_const(ctx, neg_one); auto neg = add_elementwise( - ctx, nvinfer1::ElementWiseOperation::kPROD, in, - neg_one_const, util::node_info(n) + std::string("_Negation")); + ctx, + nvinfer1::ElementWiseOperation::kPROD, + in, + neg_one_const, + util::node_info(n) + std::string("_Negation")); TORCHTRT_CHECK(neg, "Unable to create prod layer from node: " << *n); out = add_elementwise( - ctx, nvinfer1::ElementWiseOperation::kSUM, neg->getOutput(0), - neg_one_const, util::node_info(n) + std::string("_SubOne")); + ctx, + nvinfer1::ElementWiseOperation::kSUM, + neg->getOutput(0), + neg_one_const, + util::node_info(n) + std::string("_SubOne")); TORCHTRT_CHECK(out, "Unable to create sum layer from node: " << *n); } else if(in->getType() == nvinfer1::DataType::kBOOL) { // Boolean case @@ -39,8 +45,7 @@ auto bitwisenot TORCHTRT_UNUSED = } out->setName(util::node_info(n).c_str()); - auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], - out->getOutput(0)); + auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], out->getOutput(0)); LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions()); return true; diff --git a/tests/core/conversion/converters/test_bitwise.cpp b/tests/core/conversion/converters/test_bitwise.cpp index dd1d0a2f21..7826b51c44 100644 --- a/tests/core/conversion/converters/test_bitwise.cpp +++ b/tests/core/conversion/converters/test_bitwise.cpp @@ -5,10 +5,10 @@ #include "torch/csrc/jit/ir/irparser.h" std::string gen_test_graph() { - return R"IR( - graph(%0: Tensor): - %3 : Tensor = aten::bitwise_not(%0) - return (%3))IR"; + return R"IR( + graph(%0: Tensor): + %3 : Tensor = aten::bitwise_not(%0) + return (%3))IR"; } #define test_bitwise_not(dtype) \ From c0eabf5d1f0190b918db7b79bed78151ca17d1c0 Mon Sep 17 00:00:00 2001 From: Brenton Chu Date: Wed, 15 Jun 2022 14:40:57 -0700 Subject: [PATCH 7/8] more lint fixes --- core/conversion/converters/impl/bitwise.cpp | 80 ++++++++++----------- 1 file changed, 38 insertions(+), 42 deletions(-) diff --git a/core/conversion/converters/impl/bitwise.cpp b/core/conversion/converters/impl/bitwise.cpp index 76a78bf146..992c11bdf7 100644 --- a/core/conversion/converters/impl/bitwise.cpp +++ b/core/conversion/converters/impl/bitwise.cpp @@ -9,48 +9,44 @@ namespace conversion { namespace converters { namespace impl { - -auto bitwisenot TORCHTRT_UNUSED = - RegisterNodeConversionPatterns() - .pattern({"aten::bitwise_not(Tensor self) -> Tensor", - [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { - auto in = args[0].ITensorOrFreeze(ctx); - nvinfer1::ILayer* out; - - if(in->getType() == nvinfer1::DataType::kINT32) { - // Integer case, using ~x = -x - 1 - auto neg_one = torch::tensor({-1}, util::TRTDataTypeToScalarType(in->getType())); - auto neg_one_const = tensor_to_const(ctx, neg_one); - auto neg = add_elementwise( - ctx, - nvinfer1::ElementWiseOperation::kPROD, - in, - neg_one_const, - util::node_info(n) + std::string("_Negation")); - TORCHTRT_CHECK(neg, "Unable to create prod layer from node: " << *n); - out = add_elementwise( - ctx, - nvinfer1::ElementWiseOperation::kSUM, - neg->getOutput(0), - neg_one_const, - util::node_info(n) + std::string("_SubOne")); - TORCHTRT_CHECK(out, "Unable to create sum layer from node: " << *n); - } else if(in->getType() == nvinfer1::DataType::kBOOL) { - // Boolean case - out = ctx->net->addUnary(*in, nvinfer1::UnaryOperation::kNOT); - TORCHTRT_CHECK(out, "Unable to create logical not layer from node: " << *n); - } else { - LOG_ERROR("Input tensor must be 32 bit integer or boolean"); - return false; - } - - out->setName(util::node_info(n).c_str()); - auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], out->getOutput(0)); - LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions()); - - return true; - }}); - +auto bitwise_not_registrations TORCHTRT_UNUSED = RegisterNodeConversionPatterns().pattern( + {"aten::bitwise_not(Tensor self) -> Tensor", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { + auto in = args[0].ITensorOrFreeze(ctx); + nvinfer1::ILayer* out; + + if (in->getType() == nvinfer1::DataType::kINT32) { + // Integer case, using ~x = -x - 1 + auto neg_one = torch::tensor({-1}, util::TRTDataTypeToScalarType(in->getType())); + auto neg_one_const = tensor_to_const(ctx, neg_one); + auto neg = add_elementwise( + ctx, + nvinfer1::ElementWiseOperation::kPROD, + in, + neg_one_const, + util::node_info(n) + std::string("_Negation")); + TORCHTRT_CHECK(neg, "Unable to create prod layer from node: " << *n); + out = add_elementwise( + ctx, + nvinfer1::ElementWiseOperation::kSUM, + neg->getOutput(0), + neg_one_const, + util::node_info(n) + std::string("_SubOne")); + TORCHTRT_CHECK(out, "Unable to create sum layer from node: " << *n); + } else if (in->getType() == nvinfer1::DataType::kBOOL) { + // Boolean case + out = ctx->net->addUnary(*in, nvinfer1::UnaryOperation::kNOT); + TORCHTRT_CHECK(out, "Unable to create logical not layer from node: " << *n); + } else { + LOG_ERROR("Input tensor must be 32 bit integer or boolean"); + return false; + } + + out->setName(util::node_info(n).c_str()); + auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], out->getOutput(0)); + LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions()); + + return true; + }}); } // namespace impl } // namespace converters From e6998008e6409e1eb9f8d6bf616b18919fb31811 Mon Sep 17 00:00:00 2001 From: Brenton Chu Date: Thu, 23 Jun 2022 10:22:18 -0700 Subject: [PATCH 8/8] lint fix unmodified files --- core/lowering/register_trt_placeholder_ops.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/core/lowering/register_trt_placeholder_ops.cpp b/core/lowering/register_trt_placeholder_ops.cpp index 5ba8171208..17d7d3f47a 100644 --- a/core/lowering/register_trt_placeholder_ops.cpp +++ b/core/lowering/register_trt_placeholder_ops.cpp @@ -10,7 +10,10 @@ c10::AliasAnalysisKind aliasAnalysisFromSchema() { RegisterOperators trt_placeholder_ops_reg({ /// Op marks a Tensor to be conveted from an Torch Tensor /// to a TRT constant Tensor - Operator("trt::const(Tensor val) -> Tensor", [](Stack& stack) { /*noop*/ }, aliasAnalysisFromSchema()), + Operator( + "trt::const(Tensor val) -> Tensor", + [](Stack& stack) { /*noop*/ }, + aliasAnalysisFromSchema()), }); } // namespace jit