Skip to content

Commit f172c47

Browse files
authored
Fix f-string typo (#441)
Summary: att Test Plan: . Reviewers: Subscribers: Tasks: Tags:
1 parent 34b5c9c commit f172c47

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

torchao/quantization/utils.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -124,9 +124,9 @@ def cuda(self):
124124

125125
def guard_dtype_size(tensor_arg, arg_name, dtype=None, size=None):
126126
if dtype is not None and tensor_arg.dtype != dtype:
127-
raise ValueError("Expected Tensor argument {arg_name} to have dtype {dtype}, but got {tensor_arg.dtype} instead.")
127+
raise ValueError(f"Expected Tensor argument {arg_name} to have dtype {dtype}, but got {tensor_arg.dtype} instead.")
128128
if size is not None and tensor_arg.size() != size:
129-
raise ValueError("Expected Tensor argument {arg_name} to have size {size}, but got {tensor_arg.size()} instead.")
129+
raise ValueError(f"Expected Tensor argument {arg_name} to have size {size}, but got {tensor_arg.size()} instead.")
130130

131131
# taken from
132132
# https://github.com/mit-han-lab/smoothquant/blob/2f87951dacfb9238d8d657f52ae83a82a3c9ba0c/smoothquant/fake_quant.py#L26
@@ -464,13 +464,13 @@ def recommended_inductor_config_setter():
464464
coordinate_descent_tuning = True
465465
coordinate_descent_check_all_directions = True
466466
force_fuse_int_mm_with_mul = True
467-
fx_graph_cache = True
467+
fx_graph_cache = True
468468
triton.unique_kernel_names = True
469469
torch.set_float32_matmul_precision("high")
470470
"""
471471
torch._inductor.config.coordinate_descent_tuning = True
472472
torch._inductor.config.coordinate_descent_check_all_directions = True
473-
torch._inductor.config.force_fuse_int_mm_with_mul = True
474-
torch._inductor.config.fx_graph_cache = True
473+
torch._inductor.config.force_fuse_int_mm_with_mul = True
474+
torch._inductor.config.fx_graph_cache = True
475475
torch._inductor.config.triton.unique_kernel_names = True
476476
torch.set_float32_matmul_precision("high")

0 commit comments

Comments
 (0)