File tree Expand file tree Collapse file tree 2 files changed +5
-11
lines changed
test/quantization/quantize_/workflows/int4
torchao/quantization/quantize_/workflows/int4 Expand file tree Collapse file tree 2 files changed +5
-11
lines changed Original file line number Diff line number Diff line change 2626 torch_version_at_least ,
2727)
2828
29- try :
30- import torch_npu
31- except ImportError :
32- torch_npu = None
33-
3429
3530def get_config (group_size ):
3631 return Int4WeightOnlyConfig (
@@ -40,11 +35,10 @@ def get_config(group_size):
4035
4136
4237@unittest .skipIf (not torch_version_at_least ("2.7.1" ), "Need pytorch 2.7.1+" )
43- @unittest .skipIf (torch_npu is None , "torch_npu is not available" )
44- @unittest .skipIf (not torch_npu .npu .is_available (), "NPU not available" )
4538@unittest .skipIf (
46- version .parse (torch_npu .__version__ ) < version .parse ("2.7.1rc1" ),
47- "Need torch_npu 2.7.1rc1+" ,
39+ torch .accelerator .current_accelerator (True ).type == "npu"
40+ and torch .accelerator .is_available (),
41+ "NPU not available" ,
4842)
4943class Int4PlainInt32TensorNPU (TestCase ):
5044
Original file line number Diff line number Diff line change @@ -128,11 +128,11 @@ def from_hp(
128128 )
129129
130130 assert int_data .dtype == torch .int32 , (
131- f"torch_npu .npu_convert_weight_to_int4pack expects `int32` dtype"
131+ f"torch.ops.npu .npu_convert_weight_to_int4pack expects `int32` dtype"
132132 )
133133
134134 assert int_data .shape [- 1 ] % 8 == 0 , (
135- f"torch_npu .npu_convert_weight_to_int4pack expects last dim must be aligned to 8,but got { int_data .shape [- 1 ]} "
135+ f"torch.ops.npu .npu_convert_weight_to_int4pack expects last dim must be aligned to 8,but got { int_data .shape [- 1 ]} "
136136 )
137137
138138 packed_weight = torch .ops .npu .npu_convert_weight_to_int4pack (
You can’t perform that action at this time.
0 commit comments