Skip to content

Commit 7282337

Browse files
committed
Make developer experience better
1 parent c2f4460 commit 7282337

File tree

1 file changed

+10
-4
lines changed

1 file changed

+10
-4
lines changed

torchao/dtypes/affine_quantized_tensor.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,12 @@ def __repr__(self):
7070
# Tensor Subclass Definition #
7171
##############################
7272

73+
74+
class QuantizedLinearNotImplementedError(NotImplementedError):
75+
""" Thin wrapper around NotImplementedError to make it easier to catch this error in the dispatch table """
76+
pass
77+
78+
7379
_QLINEAR_DISPATCH_TABLE = {}
7480
def _register_quantized_linear_dispatch(dispatch_condition, impl):
7581
_QLINEAR_DISPATCH_TABLE[dispatch_condition] = impl
@@ -159,7 +165,7 @@ def _quantized_linear_op(input_tensor, weight_tensor, bias):
159165
if dispatch_condition(input_tensor, weight_tensor, bias):
160166
return impl(input_tensor, weight_tensor, bias)
161167

162-
raise NotImplementedError("No specialized dispatch found for quantized linear op")
168+
raise QuantizedLinearNotImplementedError("No specialized dispatch found for quantized linear op")
163169

164170
def __tensor_flatten__(self):
165171
return ["layout_tensor"], [self.block_size, self.shape, self.quant_min, self.quant_max, self.zero_point_domain, self.dtype]
@@ -887,7 +893,7 @@ def _(func, types, args, kwargs):
887893
# make the branches easier to understand in `_quantized_linear_op`
888894
try:
889895
return weight_tensor._quantized_linear_op(input_tensor, weight_tensor, bias)
890-
except:
896+
except QuantizedLinearNotImplementedError:
891897
if isinstance(input_tensor, AffineQuantizedTensor):
892898
input_tensor = input_tensor.dequantize()
893899
if isinstance(weight_tensor, AffineQuantizedTensor):
@@ -910,7 +916,7 @@ def _(func, types, args, kwargs):
910916
try:
911917
weight_tensor = weight_tensor.t()
912918
return weight_tensor._quantized_linear_op(input_tensor, weight_tensor, bias)
913-
except:
919+
except QuantizedLinearNotImplementedError:
914920
if isinstance(input_tensor, AffineQuantizedTensor):
915921
input_tensor = input_tensor.dequantize()
916922
if isinstance(weight_tensor, AffineQuantizedTensor):
@@ -930,7 +936,7 @@ def _(func, types, args, kwargs):
930936
try:
931937
weight_tensor = weight_tensor.t()
932938
return weight_tensor._quantized_linear_op(input_tensor, weight_tensor, bias)
933-
except:
939+
except QuantizedLinearNotImplementedError:
934940
if isinstance(input_tensor, AffineQuantizedTensor):
935941
input_tensor = input_tensor.dequantize()
936942
if isinstance(weight_tensor, AffineQuantizedTensor):

0 commit comments

Comments
 (0)