Skip to content

Commit 222402e

Browse files
HolyFalafelulivne
authored andcommitted
[SW-177468] Removed unused code + cleanup
Change-Id: I4d27c067e87c1a30eb1da9df16a16c46d092c638
1 parent 7329e4f commit 222402e

File tree

6 files changed

+3
-29
lines changed

6 files changed

+3
-29
lines changed

neural_compressor/torch/algorithms/fp8_quant/_core/common.py

Lines changed: 0 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,6 @@ def __init__(self, num_inputs, param_names, num_outputs, required_output):
5555
}
5656
descale_fcn = lambda x, scale: torch.mul(x, scale)
5757
scale_fcn = lambda x, scale: torch.div(x, scale)
58-
mat_scale_fcn = lambda x, scale_col, scale_row: torch.div(torch.div(x, scale_col), scale_row)
5958
cast_fcn = lambda x, dtype: x.to(dtype=dtype)
6059
cast_to_fp8_fcn = lambda x, dtype, scale_inv=None: torch.ops.hpu.cast_to_fp8_v2(x, scale_inv, False, False, dtype)[0]
6160
cast_from_fp8_fcn = lambda x, dtype, scale=None: torch.ops.hpu.cast_from_fp8(x, scale, dtype)
@@ -76,25 +75,6 @@ def rec_fn(x, fn):
7675
return fn(x)
7776

7877

79-
def np_to_pt(x):
80-
return rec_fn(x, lambda x: torch.tensor(x) if isinstance(x, np.ndarray) else x)
81-
82-
83-
def pt_to_np(x):
84-
return rec_fn(
85-
x,
86-
lambda x: (x.detach().cpu().float().numpy() if isinstance(x, torch.Tensor) else x),
87-
)
88-
89-
90-
def np_to_list(x):
91-
return rec_fn(x, lambda x: x.tolist() if isinstance(x, np.ndarray) else x)
92-
93-
94-
def list_to_np(x):
95-
return rec_fn(x, lambda x: np.array(x) if isinstance(x, list) else x)
96-
97-
9878
def save_json(d, fname):
9979
with open(fname, "w") as f:
10080
json.dump(d, f, indent=4)

neural_compressor/torch/algorithms/fp8_quant/_core/measure.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
ScaleMethod,
1010
MeasureExclude,
1111
get_hqt_config,
12+
set_hqt_config,
1213
)
1314
from .common import *
1415
from ..utils.logger import logger
@@ -229,11 +230,6 @@ def load_measurements(model, fname):
229230
return d
230231

231232

232-
def get_default_config(mod_list):
233-
config = {k: "default" for k in mod_list}
234-
return config
235-
236-
237233
def save_json(d, fname):
238234
with open(fname, "w") as f:
239235
json.dump(d, f, indent=4)

neural_compressor/torch/algorithms/fp8_quant/_core/scale.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import torch
22
import numpy as np
33

4-
from .._quant_common.quant_config import ScaleMethod
4+
from .._quant_common.quant_config import ScaleMethod, set_hqt_config
55
from .scale_methods import *
66
from .quant_dequant import *
77

neural_compressor/torch/algorithms/fp8_quant/_quant_common/helper_modules.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import torch.nn as nn
22
import torch
33

4-
from .quant_config import QuantMode, get_hqt_config, set_hqt_config
4+
from .quant_config import QuantMode, get_hqt_config
55

66
try: # backwards compatibility for 1.16
77
from habana_frameworks.torch.hpex.kernels import fp8_fused_sdpa

neural_compressor/torch/algorithms/fp8_quant/_quant_common/quant_config.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313

1414
local_rank = int(os.getenv("LOCAL_RANK", "-1"))
1515
world_size = int(os.getenv("WORLD_SIZE", "-1"))
16-
global_rank = int(os.getenv("RANK", "-1"))
1716

1817

1918
class QuantMode(Enum):

test/3x/torch/algorithms/fp8_quant/unit_tests/test_deepspeed.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import os
21
import typing
32

43
import pytest

0 commit comments

Comments
 (0)