Skip to content

Commit 832b1c7

Browse files
committed
refactor: Apply linting
Signed-off-by: Naren Dasan <[email protected]> Signed-off-by: Naren Dasan <[email protected]>
1 parent 52e2f05 commit 832b1c7

File tree

10 files changed

+125
-104
lines changed

10 files changed

+125
-104
lines changed

core/compiler.cpp

Lines changed: 26 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -288,18 +288,24 @@ GraphAndMapping ConstructFallbackGraph(
288288
return {new_g, old_to_new_g};
289289
}
290290

291-
292-
void MapInputsAndDetermineDTypes(CompileSpec& cfg, std::shared_ptr<torch::jit::Graph>& g, ir::StaticParams& static_params, ir::TypeMap& first_use_type_map) {
291+
void MapInputsAndDetermineDTypes(
292+
CompileSpec& cfg,
293+
std::shared_ptr<torch::jit::Graph>& g,
294+
ir::StaticParams& static_params,
295+
ir::TypeMap& first_use_type_map) {
293296
// Associate input specs with inputs
294297
cfg.convert_info.inputs = std::move(ir::associate_specs_with_inputs(g, cfg.inputs, static_params));
295298

296299
for (auto& in : g->inputs()) {
297300
auto est_type_opt = first_use_type_map.find(in)->second;
298301
ir::Input& spec = cfg.convert_info.inputs.find(in)->second;
299302
if (est_type_opt && !spec.dtype_is_user_defined) {
300-
// If we can calculate the type from the graph and the type was not defined by the user then use the calculated type
301-
LOG_INFO("Since input type is not explicitly defined, infering using first tensor calculation\n Found input "
302-
<< in->debugName() << " has type " << est_type_opt.value() << ". If this is incorrect explicitly set dtype for input and file a bug");
303+
// If we can calculate the type from the graph and the type was not defined by the user then use the calculated
304+
// type
305+
LOG_INFO(
306+
"Since input type is not explicitly defined, infering using first tensor calculation\n Found input "
307+
<< in->debugName() << " has type " << est_type_opt.value()
308+
<< ". If this is incorrect explicitly set dtype for input and file a bug");
303309
spec.dtype = util::ScalarTypeToTRTDataType(est_type_opt.value());
304310
} else if (!est_type_opt && !spec.dtype_is_user_defined) {
305311
// If we cannot calculate the type and the user did not define the type, then default to FP32
@@ -313,7 +319,7 @@ void MapInputsAndDetermineDTypes(CompileSpec& cfg, std::shared_ptr<torch::jit::G
313319
} else {
314320
if (util::TRTDataTypeToScalarType(cfg.convert_info.inputs.find(in)->second.dtype) != est_type_opt.value()) {
315321
std::stringstream ss;
316-
ss <<"For input " << in->debugName() << ", found user specified input dtype as ";
322+
ss << "For input " << in->debugName() << ", found user specified input dtype as ";
317323
ss << cfg.convert_info.inputs.find(in)->second.dtype;
318324
ss << ", however when inspecting the graph, the input type expected was inferred to be ";
319325
ss << est_type_opt.value() << std::endl;
@@ -340,7 +346,9 @@ std::string ConvertGraphToTRTEngine(const torch::jit::script::Module& mod, std::
340346
auto graph_and_parameters = lowering::Lower(mod, method_name, cfg.lower_info);
341347

342348
auto g = graph_and_parameters.first;
343-
TRTORCH_CHECK(conversion::VerifyConverterSupportForBlock(g->block()), "Not all operations in graph are supported by the compiler");
349+
TRTORCH_CHECK(
350+
conversion::VerifyConverterSupportForBlock(g->block()),
351+
"Not all operations in graph are supported by the compiler");
344352
auto params = graph_and_parameters.second;
345353
auto static_params = ir::get_static_params(g->inputs(), params);
346354
// Infer the type of an input from the weights of the calculation
@@ -385,17 +393,17 @@ torch::jit::Module CompileGraph(const torch::jit::Module& mod, CompileSpec cfg)
385393

386394
MapInputsAndDetermineDTypes(cfg, g, static_params, first_use_types);
387395

388-
if (cfg.partition_info.enabled
389-
&& (cfg.lower_info.forced_fallback_modules.size() == 0
390-
&& cfg.partition_info.forced_fallback_operators.size() == 0
391-
&& conversion::VerifyConverterSupportForBlock(g->block(), true))) {
396+
if (cfg.partition_info.enabled &&
397+
(cfg.lower_info.forced_fallback_modules.size() == 0 &&
398+
cfg.partition_info.forced_fallback_operators.size() == 0 &&
399+
conversion::VerifyConverterSupportForBlock(g->block(), true))) {
392400
LOG_INFO("Skipping partitioning since model is fully supported");
393401
}
394402

395-
if (cfg.partition_info.enabled
396-
&& !(cfg.lower_info.forced_fallback_modules.size() == 0
397-
&& cfg.partition_info.forced_fallback_operators.size() == 0
398-
&& conversion::VerifyConverterSupportForBlock(g->block(), false))) {
403+
if (cfg.partition_info.enabled &&
404+
!(cfg.lower_info.forced_fallback_modules.size() == 0 &&
405+
cfg.partition_info.forced_fallback_operators.size() == 0 &&
406+
conversion::VerifyConverterSupportForBlock(g->block(), false))) {
399407
auto input_ivalues_map = partitioning::generateRandomInputs(cfg.convert_info.inputs, first_use_types);
400408
auto graph_and_mapping = ConstructFallbackGraph(new_mod, g->block(), input_ivalues_map, cfg, static_params);
401409
new_g = graph_and_mapping.first;
@@ -408,7 +416,9 @@ torch::jit::Module CompileGraph(const torch::jit::Module& mod, CompileSpec cfg)
408416
return mod;
409417
}
410418
} else {
411-
TRTORCH_CHECK(conversion::VerifyConverterSupportForBlock(g->block()), "Not all operations in graph are supported by the compiler");
419+
TRTORCH_CHECK(
420+
conversion::VerifyConverterSupportForBlock(g->block()),
421+
"Not all operations in graph are supported by the compiler");
412422
auto engine = conversion::ConvertBlockToEngine(g->block(), cfg.convert_info, static_params);
413423
auto device_spec = cfg.convert_info.engine_settings.device;
414424
auto cuda_device = runtime::CudaDevice(device_spec.gpu_id, device_spec.device_type);

core/conversion/conversion.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -518,7 +518,7 @@ bool VerifyConverterSupportForBlock(const torch::jit::Block* b, bool suppress_er
518518
if (suppress_errors) {
519519
LOG_ERROR(
520520
"Unsupported operator: " << *schema << std::endl
521-
<< trtorch::core::util::GetPyTorchSourceCode(n) << std::endl);
521+
<< trtorch::core::util::GetPyTorchSourceCode(n) << std::endl);
522522
}
523523
}
524524
}

core/conversion/conversion.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ std::string ConvertBlockToEngine(
2525

2626
bool OpSupported(const torch::jit::Node* n);
2727

28-
bool VerifyConverterSupportForBlock(const torch::jit::Block* b, bool suppress_errors=false);
28+
bool VerifyConverterSupportForBlock(const torch::jit::Block* b, bool suppress_errors = false);
2929

3030
c10::optional<torch::jit::IValue> EvaluateNode(
3131
ConversionCtx* ctx,

core/util/jit_util.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ namespace trtorch {
99
namespace core {
1010
namespace util {
1111

12-
1312
inline std::string node_info(const torch::jit::Node* n) {
1413
std::stringstream ss;
1514
ss << *n;

cpp/include/trtorch/trtorch.h

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -387,7 +387,8 @@ struct TRTORCH_API CompileSpec {
387387
* / traditional TRT convection (FP32 for FP32 only, FP16 for FP32 and FP16, FP32 for Int8)
388388
*
389389
* @param shape Input tensor shape
390-
* @param dtype Expected data type for the input (Defaults to the type of the weights in the first tensor calculation if detectable else Float32)
390+
* @param dtype Expected data type for the input (Defaults to the type of the weights in the first tensor
391+
* calculation if detectable else Float32)
391392
* @param format Expected tensor format for the input (Defaults to contiguous)
392393
*/
393394
Input(std::vector<int64_t> shape, TensorFormat format = TensorFormat::kContiguous);
@@ -398,7 +399,8 @@ struct TRTORCH_API CompileSpec {
398399
* tensor format
399400
*
400401
* @param shape Input tensor shape
401-
* @param dtype Expected data type for the input (Defaults to the type of the weights in the first tensor calculation if detectable else Float32)
402+
* @param dtype Expected data type for the input (Defaults to the type of the weights in the first tensor
403+
* calculation if detectable else Float32)
402404
* @param format Expected tensor format for the input (Defaults to contiguous)
403405
*/
404406
Input(std::vector<int64_t> shape, DataType dtype, TensorFormat format = TensorFormat::kContiguous);
@@ -421,7 +423,8 @@ struct TRTORCH_API CompileSpec {
421423
* allow the user to configure expected input shape tensor format
422424
*
423425
* @param shape Input tensor shape
424-
* @param dtype Expected data type for the input (Defaults to the type of the weights in the first tensor calculation if detectable else Float32)
426+
* @param dtype Expected data type for the input (Defaults to the type of the weights in the first tensor
427+
* calculation if detectable else Float32)
425428
* @param format Expected tensor format for the input (Defaults to contiguous)
426429
*/
427430
Input(c10::ArrayRef<int64_t> shape, DataType dtype, TensorFormat format = TensorFormat::kContiguous);
@@ -451,7 +454,8 @@ struct TRTORCH_API CompileSpec {
451454
* @param min_shape Minimum shape for input tensor
452455
* @param opt_shape Target optimization shape for input tensor
453456
* @param max_shape Maximum acceptible shape for input tensor
454-
* @param dtype Expected data type for the input (Defaults to the type of the weights in the first tensor calculation if detectable else Float32)
457+
* @param dtype Expected data type for the input (Defaults to the type of the weights in the first tensor
458+
* calculation if detectable else Float32)
455459
* @param format Expected tensor format for the input (Defaults to contiguous)
456460
*/
457461
Input(
@@ -486,7 +490,8 @@ struct TRTORCH_API CompileSpec {
486490
* @param min_shape Minimum shape for input tensor
487491
* @param opt_shape Target optimization shape for input tensor
488492
* @param max_shape Maximum acceptible shape for input tensor
489-
* @param dtype Expected data type for the input (Defaults to the type of the weights in the first tensor calculation if detectable else Float32)
493+
* @param dtype Expected data type for the input (Defaults to the type of the weights in the first tensor
494+
* calculation if detectable else Float32)
490495
* @param format Expected tensor format for the input (Defaults to contiguous)
491496
*/
492497
Input(
@@ -646,13 +651,14 @@ struct TRTORCH_API CompileSpec {
646651
uint64_t min_block_size = 3;
647652

648653
/**
649-
* List of aten operators that must be run in PyTorch. An error will be thrown if this list is not empty but ``require_full_compilation`` is True
654+
* List of aten operators that must be run in PyTorch. An error will be thrown if this list is not empty but
655+
* ``require_full_compilation`` is True
650656
*/
651657
std::vector<std::string> torch_executed_ops;
652658

653-
654659
/**
655-
* List of modules that must be run in PyTorch. An error will be thrown if this list is not empty but ``require_full_compilation`` is True
660+
* List of modules that must be run in PyTorch. An error will be thrown if this list is not empty but
661+
* ``require_full_compilation`` is True
656662
*/
657663
std::vector<std::string> torch_executed_modules;
658664
};

cpp/src/compile_spec.cpp

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -322,13 +322,15 @@ core::CompileSpec to_internal_compile_spec(CompileSpec external) {
322322
internal.convert_info.engine_settings.device.allow_gpu_fallback = external.device.allow_gpu_fallback;
323323
internal.convert_info.engine_settings.max_batch_size = external.max_batch_size;
324324

325-
TRTORCH_CHECK(!(external.require_full_compilation && (external.torch_executed_ops.size() > 0)),
326-
"require_full_compilation is enabled however the list of ops to run in torch is not empty (Found "
327-
<< external.torch_executed_ops.size() << " ops)");
325+
TRTORCH_CHECK(
326+
!(external.require_full_compilation && (external.torch_executed_ops.size() > 0)),
327+
"require_full_compilation is enabled however the list of ops to run in torch is not empty (Found "
328+
<< external.torch_executed_ops.size() << " ops)");
328329

329-
TRTORCH_CHECK(!(external.require_full_compilation && (external.torch_executed_modules.size() > 0)),
330-
"require_full_compilation is enabled however the list of modules to run in torch is not empty (Found "
331-
<< external.torch_executed_modules.size() << " modules)");
330+
TRTORCH_CHECK(
331+
!(external.require_full_compilation && (external.torch_executed_modules.size() > 0)),
332+
"require_full_compilation is enabled however the list of modules to run in torch is not empty (Found "
333+
<< external.torch_executed_modules.size() << " modules)");
332334

333335
internal.partition_info.enabled = external.require_full_compilation;
334336
internal.partition_info.min_block_size = external.min_block_size;

py/trtorch/Input.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -135,30 +135,30 @@ def _to_internal(self) -> trtorch._C.Input:
135135
if self.shape_mode == Input._ShapeMode.DYNAMIC:
136136
if not Input._supported_input_size_type(self.shape["min_shape"]):
137137
raise TypeError(
138-
"Input shape specifications for inputs are required to be a List, tuple or torch.Size, found type: "
139-
+ str(type(self.shape["min_shape"])) + " for min_shape")
138+
"Input shape specifications for inputs are required to be a List, tuple or torch.Size, found type: "
139+
+ str(type(self.shape["min_shape"])) + " for min_shape")
140140
else:
141141
internal_in.min = self.shape["min_shape"]
142142

143143
if not Input._supported_input_size_type(self.shape["opt_shape"]):
144144
raise TypeError(
145-
"Input shape specifications for inputs are required to be a List, tuple or torch.Size, found type: "
146-
+ str(type(self.shape["opt_shape"])) + " for opt_shape")
145+
"Input shape specifications for inputs are required to be a List, tuple or torch.Size, found type: "
146+
+ str(type(self.shape["opt_shape"])) + " for opt_shape")
147147
else:
148148
internal_in.min = self.shape["op_shape"]
149149

150150
if not Input._supported_input_size_type(self.shape["max_shape"]):
151151
raise TypeError(
152-
"Input shape specifications for inputs are required to be a List, tuple or torch.Size, found type: "
153-
+ str(type(self.shape["max_shape"])) + " for max_shape")
152+
"Input shape specifications for inputs are required to be a List, tuple or torch.Size, found type: "
153+
+ str(type(self.shape["max_shape"])) + " for max_shape")
154154
else:
155155
internal_in.min = self.shape["opt_shape"]
156156
internal_in.input_is_dynamic = True
157157
else:
158158
if not Input._supported_input_size_type(self.shape):
159159
raise TypeError(
160-
"Input shape specifications for inputs are required to be a List, tuple or torch.Size, found type: "
161-
+ str(type(self.shape)) + " for shape")
160+
"Input shape specifications for inputs are required to be a List, tuple or torch.Size, found type: "
161+
+ str(type(self.shape)) + " for shape")
162162
else:
163163
internal_in.opt = self.shape
164164
internal_in.input_is_dynamic = False

py/trtorch/_compile_spec.py

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -306,17 +306,18 @@ def TensorRTCompileSpec(inputs=[],
306306
compile_spec = {
307307
"inputs": inputs,
308308
"device": device,
309-
"disable_tf32": disable_tf32, # Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas
310-
"sparse_weights": sparse_weights, #Enable sparsity for convolution and fully connected layers.
311-
"enabled_precisions": enabled_precisions, # Enabling FP16 kernels
312-
"refit": refit, # enable refit
313-
"debug": debug, # enable debuggable engine
314-
"strict_types": strict_types, # kernels should strictly run in operating precision
315-
"capability": capability, # Restrict kernel selection to safe gpu kernels or safe dla kernels
316-
"num_min_timing_iters": num_min_timing_iters, # Number of minimization timing iterations used to select kernels
317-
"num_avg_timing_iters": num_avg_timing_iters, # Number of averaging timing iterations used to select kernels
318-
"workspace_size": workspace_size, # Maximum size of workspace given to TensorRT
319-
"max_batch_size": max_batch_size, # Maximum batch size (must be >= 1 to be set, 0 means not set)
309+
"disable_tf32":
310+
disable_tf32, # Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas
311+
"sparse_weights": sparse_weights, #Enable sparsity for convolution and fully connected layers.
312+
"enabled_precisions": enabled_precisions, # Enabling FP16 kernels
313+
"refit": refit, # enable refit
314+
"debug": debug, # enable debuggable engine
315+
"strict_types": strict_types, # kernels should strictly run in operating precision
316+
"capability": capability, # Restrict kernel selection to safe gpu kernels or safe dla kernels
317+
"num_min_timing_iters": num_min_timing_iters, # Number of minimization timing iterations used to select kernels
318+
"num_avg_timing_iters": num_avg_timing_iters, # Number of averaging timing iterations used to select kernels
319+
"workspace_size": workspace_size, # Maximum size of workspace given to TensorRT
320+
"max_batch_size": max_batch_size, # Maximum batch size (must be >= 1 to be set, 0 means not set)
320321
"calibrator": calibrator,
321322
"truncate_long_and_double": truncate_long_and_double
322323
}

0 commit comments

Comments
 (0)