@@ -58,7 +58,7 @@ auto element_wise_registrations TORCHTRT_UNUSED =
58
58
auto scalar = args[2 ].unwrapToScalar ();
59
59
60
60
if (1 != scalar.to <float >()) {
61
- auto alphaTensor = impl:: scalar_to_tensor (ctx, scalar);
61
+ auto alphaTensor = scalar_to_tensor (ctx, scalar);
62
62
auto scaleLayer = add_elementwise (
63
63
ctx,
64
64
nvinfer1::ElementWiseOperation::kPROD ,
@@ -87,7 +87,7 @@ auto element_wise_registrations TORCHTRT_UNUSED =
87
87
auto scalar = args[2 ].unwrapToScalar ();
88
88
89
89
if (1 != scalar.to <float >()) {
90
- auto alphaTensor = impl:: scalar_to_tensor (ctx, scalar);
90
+ auto alphaTensor = scalar_to_tensor (ctx, scalar);
91
91
auto scaleLayer = add_elementwise (
92
92
ctx,
93
93
nvinfer1::ElementWiseOperation::kPROD ,
@@ -262,11 +262,11 @@ auto element_wise_registrations TORCHTRT_UNUSED =
262
262
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
263
263
// Should implement other - alpha * self
264
264
auto self = args[0 ].ITensorOrFreeze (ctx);
265
- auto other = impl:: scalar_to_tensor (ctx, args[1 ].unwrapToScalar ());
265
+ auto other = scalar_to_tensor (ctx, args[1 ].unwrapToScalar ());
266
266
auto scalar = args[2 ].unwrapToScalar ();
267
267
268
268
if (1 != scalar.to <float >()) {
269
- auto alphaTensor = impl:: scalar_to_tensor (ctx, scalar);
269
+ auto alphaTensor = scalar_to_tensor (ctx, scalar);
270
270
auto scaleLayer = add_elementwise (
271
271
ctx,
272
272
nvinfer1::ElementWiseOperation::kPROD ,
@@ -294,7 +294,7 @@ auto element_wise_registrations TORCHTRT_UNUSED =
294
294
auto scalar = args[2 ].unwrapToScalar ();
295
295
296
296
if (1 != scalar.to <float >()) {
297
- auto alphaTensor = impl:: scalar_to_tensor (ctx, scalar);
297
+ auto alphaTensor = scalar_to_tensor (ctx, scalar);
298
298
auto scaleLayer = add_elementwise (
299
299
ctx,
300
300
nvinfer1::ElementWiseOperation::kPROD ,
0 commit comments