Skip to content

Commit 648bad3

Browse files
committed
fix: Partial compilation translation to internal settings was incorrect
Signed-off-by: Naren Dasan <[email protected]> Signed-off-by: Naren Dasan <[email protected]>
1 parent 540e135 commit 648bad3

File tree

2 files changed

+5
-5
lines changed

2 files changed

+5
-5
lines changed

cpp/src/compile_spec.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ torchtrt::core::CompileSpec to_internal_compile_spec(CompileSpec external) {
5454
"require_full_compilation is enabled however the list of modules to run in torch is not empty (Found "
5555
<< external.torch_executed_modules.size() << " modules)");
5656

57-
internal.partition_info.enabled = external.require_full_compilation;
57+
internal.partition_info.enabled = !external.require_full_compilation;
5858
internal.partition_info.min_block_size = external.min_block_size;
5959
internal.partition_info.forced_fallback_operators = std::move(external.torch_executed_ops);
6060
internal.lower_info.forced_fallback_modules = std::move(external.torch_executed_modules);

py/torch_tensorrt/ts/_compiler.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -95,8 +95,7 @@ def compile(module: torch.jit.ScriptModule,
9595
spec = {
9696
"inputs": inputs,
9797
"device": device,
98-
"disable_tf32":
99-
disable_tf32, # Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas
98+
"disable_tf32": disable_tf32, # Force FP32 layers to use traditional as FP32 format
10099
"sparse_weights": sparse_weights, #Enable sparsity for convolution and fully connected layers.
101100
"enabled_precisions": enabled_precisions, # Enabling FP16 kernels
102101
"refit": refit, # enable refit
@@ -111,8 +110,9 @@ def compile(module: torch.jit.ScriptModule,
111110
"truncate_long_and_double": truncate_long_and_double,
112111
"torch_fallback": {
113112
"enabled": not require_full_compilation,
114-
"force_fallback_ops": torch_executed_ops,
115-
"force_fallback_modules": torch_executed_modules
113+
"forced_fallback_ops": torch_executed_ops,
114+
"forced_fallback_modules": torch_executed_modules,
115+
"min_block_size": min_block_sizexs
116116
}
117117
}
118118

0 commit comments

Comments
 (0)