Skip to content

Commit 77bf9da

Browse files
committed
refactor: Update for new api
Signed-off-by: Naren Dasan <[email protected]> Signed-off-by: Naren Dasan <[email protected]>
1 parent 3ff272f commit 77bf9da

File tree

2 files changed

+6
-6
lines changed

2 files changed

+6
-6
lines changed

core/conversion/conversion.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -215,15 +215,15 @@ void MarkOutputs(ConversionCtx* ctx, at::ArrayRef<const torch::jit::Value*> outp
215215
ctx->logger, "Marking Output " << out->debugName() << " named " << name << " in engine (ctx.MarkOutput)");
216216
ctx->num_outputs += 1;
217217
} else if (out_ivalue.isTuple()) {
218-
TRTORCH_THROW_ERROR("Tuple type. Only a single tensor or a TensorList type is supported.");
218+
TORCHTRT_THROW_ERROR("Tuple type. Only a single tensor or a TensorList type is supported.");
219219
} else if (out_ivalue.isList()) {
220-
TRTORCH_THROW_ERROR("List type. Only a single tensor or a TensorList type is supported.");
220+
TORCHTRT_THROW_ERROR("List type. Only a single tensor or a TensorList type is supported.");
221221
} else if (out_ivalue.isScalar()) {
222-
TRTORCH_THROW_ERROR("Scalar type. Only a single tensor or a TensorList type is supported.");
222+
TORCHTRT_THROW_ERROR("Scalar type. Only a single tensor or a TensorList type is supported.");
223223
} else if (out_ivalue.isTensor()) {
224224
// prim::NumToTensor will go to here
225225
std::string name = std::string("output_") + std::to_string(ctx->num_outputs);
226-
auto out_tensor = trtorch::core::conversion::converters::tensor_to_const(ctx, out_ivalue.toTensor(), "");
226+
auto out_tensor = converters::tensor_to_const(ctx, out_ivalue.toTensor(), "");
227227
out_tensor->setName(name.c_str());
228228
ctx->net->markOutput(*out_tensor);
229229
LOG_INFO(

core/partitioning/shape_analysis.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ void getSegmentsOutputByRunning(
112112
// set the input_shape and data_type
113113
at::ScalarType t = ivalues_maps[i].toTensor().scalar_type();
114114
if (!partition_info.truncate_long_and_double && (t == at::kLong || t == at::kDouble)) {
115-
TRTORCH_THROW_ERROR(
115+
TORCHTRT_THROW_ERROR(
116116
"Unable to process subgraph input type of at::kLong/at::kDouble, try to compile model with truncate_long_and_double enabled");
117117
} else if (partition_info.truncate_long_and_double && t == at::kLong) {
118118
ivalues_maps[i] = ivalues_maps[i].toTensor().to(at::kInt);
@@ -123,7 +123,7 @@ void getSegmentsOutputByRunning(
123123
}
124124
c10::optional<nvinfer1::DataType> dtype = util::optTypeMetaToTRTDataType(ivalues_maps[i].toTensor().dtype());
125125
if (dtype == c10::nullopt) {
126-
TRTORCH_THROW_ERROR("Unsupported input data type " << ivalues_maps[i].toTensor().dtype());
126+
TORCHTRT_THROW_ERROR("Unsupported input data type " << ivalues_maps[i].toTensor().dtype());
127127
}
128128
if (ivalues_maps[i].toTensor().sizes().size() == 0) {
129129
// handle Scalar types, which has sizes of []

0 commit comments

Comments
 (0)