Skip to content

Commit 83bc779

Browse files
authored
Fix logger (#1583)
Signed-off-by: yiliu30 <[email protected]> Signed-off-by: chensuyue <[email protected]>
1 parent fb61428 commit 83bc779

File tree

12 files changed

+175
-161
lines changed

12 files changed

+175
-161
lines changed

.azure-pipelines/scripts/ut/3x/run_3x_pt.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ fi
5151

5252
# check pytest issue
5353
if [ -s run_pytest.sh ]; then
54-
if [ $(grep -c '== FAILURES ==' ${ut_log_name}) != 0 ] || [ $(grep -c '== ERRORS ==' ${ut_log_name}) != 0 ] || [ $(grep -c 'passed,' ${ut_log_name}) == 0 ]; then
54+
if [ $(grep -c '== FAILURES ==' ${ut_log_name}) != 0 ] || [ $(grep -c '== ERRORS ==' ${ut_log_name}) != 0 ] || [ $(grep -c ' passed ' ${ut_log_name}) == 0 ]; then
5555
echo "Find errors in pytest case, please check the output..."
5656
echo "Please search for '== FAILURES ==' or '== ERRORS =='"
5757
ut_status="failed"

docs/source/adaptor.md

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -44,22 +44,17 @@ For example, a user can implement an `Abc` adaptor like below:
4444
```python
4545
@adaptor_registry
4646
class AbcAdaptor(Adaptor):
47-
def __init__(self, framework_specific_info):
48-
...
47+
def __init__(self, framework_specific_info): ...
4948

50-
def quantize(self, tune_cfg, model, dataloader, q_func=None):
51-
...
49+
def quantize(self, tune_cfg, model, dataloader, q_func=None): ...
5250

5351
def evaluate(
5452
self, model, dataloader, postprocess=None, metric=None, measurer=None, iteration=-1, tensorboard=False
55-
):
56-
...
53+
): ...
5754

58-
def query_fw_capability(self, model):
59-
...
55+
def query_fw_capability(self, model): ...
6056

61-
def query_fused_patterns(self, model):
62-
...
57+
def query_fused_patterns(self, model): ...
6358
```
6459

6560
* `quantize` function is used to perform quantization for post-training quantization and quantization-aware training. Quantization processing includes calibration and conversion processing for post-training quantization, while for quantization-aware training, it includes training and conversion processing.

docs/source/migration.md

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,7 @@ val_dataloader = torch.utils.data.Dataloader(
4141
)
4242

4343

44-
def eval_func(model):
45-
...
44+
def eval_func(model): ...
4645

4746

4847
# Quantization code
@@ -115,8 +114,7 @@ val_dataloader = torch.utils.data.Dataloader(
115114
)
116115

117116

118-
def eval_func(model):
119-
...
117+
def eval_func(model): ...
120118

121119

122120
# Quantization code
@@ -147,12 +145,10 @@ model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path)
147145
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
148146

149147

150-
def eval_func(model):
151-
...
148+
def eval_func(model): ...
152149

153150

154-
def train_func(model):
155-
...
151+
def train_func(model): ...
156152

157153

158154
trainer = Trainer(...)
@@ -213,12 +209,10 @@ model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path)
213209
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
214210

215211

216-
def eval_func(model):
217-
...
212+
def eval_func(model): ...
218213

219214

220-
def train_func(model):
221-
...
215+
def train_func(model): ...
222216

223217

224218
trainer = Trainer(...)

docs/source/pruning.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -342,6 +342,7 @@ The following section exemplifies how to use hooks in user pass-in training func
342342
on_after_optimizer_step() # Update weights' criteria, mask weights
343343
on_train_end() # End of pruner, print sparse information
344344
"""
345+
345346
from neural_compressor.training import prepare_compression, WeightPruningConfig
346347

347348
config = WeightPruningConfig(configs)

docs/source/quantization.md

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -319,8 +319,7 @@ criterion = ...
319319

320320

321321
# Quantization code
322-
def train_func(model):
323-
...
322+
def train_func(model): ...
324323

325324

326325
from neural_compressor import QuantizationAwareTrainingConfig

docs/source/tuning_strategies.md

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -507,8 +507,7 @@ For example, user can implement an `Abc` strategy like below:
507507
```python
508508
@strategy_registry
509509
class AbcTuneStrategy(TuneStrategy):
510-
def __init__(self, model, conf, q_dataloader, q_func=None, eval_dataloader=None, eval_func=None, dicts=None):
511-
...
510+
def __init__(self, model, conf, q_dataloader, q_func=None, eval_dataloader=None, eval_func=None, dicts=None): ...
512511

513512
def next_tune_cfg(self):
514513
# generate the next tuning config

neural_compressor/common/__init__.py

Lines changed: 5 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -14,37 +14,21 @@
1414

1515
from neural_compressor.common.utils import (
1616
level,
17-
log,
18-
info,
19-
DEBUG,
20-
debug,
21-
warn,
22-
warning,
23-
error,
24-
fatal,
17+
logger,
18+
Logger,
2519
set_random_seed,
26-
set_workspace,
2720
set_resume_from,
21+
set_workspace,
2822
set_tensorboard,
29-
Logger,
30-
logger,
3123
)
3224
from neural_compressor.common.base_config import options
3325

3426

3527
__all__ = [
36-
"level",
37-
"log",
38-
"info",
39-
"DEBUG",
40-
"debug",
41-
"warn",
42-
"warning",
43-
"error",
44-
"fatal",
4528
"options",
46-
"Logger",
29+
"level",
4730
"logger",
31+
"Logger",
4832
"set_workspace",
4933
"set_random_seed",
5034
"set_resume_from",

neural_compressor/common/utils/logger.py

Lines changed: 76 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,28 @@
1919
import logging
2020
import os
2121

22+
__all__ = [
23+
"level",
24+
"Logger", # TODO: not expose it
25+
"logger",
26+
]
27+
28+
29+
def _pretty_dict(value, indent=0):
30+
"""Make the logger dict pretty."""
31+
prefix = "\n" + " " * (indent + 4)
32+
if isinstance(value, dict):
33+
items = [prefix + repr(key) + ": " + _pretty_dict(value[key], indent + 4) for key in value]
34+
return "{%s}" % (",".join(items) + "\n" + " " * indent)
35+
elif isinstance(value, list):
36+
items = [prefix + _pretty_dict(item, indent + 4) for item in value]
37+
return "[%s]" % (",".join(items) + "\n" + " " * indent)
38+
elif isinstance(value, tuple):
39+
items = [prefix + _pretty_dict(item, indent + 4) for item in value]
40+
return "(%s)" % (",".join(items) + "\n" + " " * indent)
41+
else:
42+
return repr(value)
43+
2244

2345
class Logger(object):
2446
"""Logger class."""
@@ -50,85 +72,61 @@ def get_logger(self):
5072
"""Get the logger."""
5173
return self._logger
5274

53-
54-
def _pretty_dict(value, indent=0):
55-
"""Make the logger dict pretty."""
56-
prefix = "\n" + " " * (indent + 4)
57-
if isinstance(value, dict):
58-
items = [prefix + repr(key) + ": " + _pretty_dict(value[key], indent + 4) for key in value]
59-
return "{%s}" % (",".join(items) + "\n" + " " * indent)
60-
elif isinstance(value, list):
61-
items = [prefix + _pretty_dict(item, indent + 4) for item in value]
62-
return "[%s]" % (",".join(items) + "\n" + " " * indent)
63-
elif isinstance(value, tuple):
64-
items = [prefix + _pretty_dict(item, indent + 4) for item in value]
65-
return "(%s)" % (",".join(items) + "\n" + " " * indent)
66-
else:
67-
return repr(value)
75+
@staticmethod
76+
def log(level, msg, *args, **kwargs):
77+
"""Output log with the level as a parameter."""
78+
if isinstance(msg, dict):
79+
for _, line in enumerate(_pretty_dict(msg).split("\n")):
80+
Logger().get_logger().log(level, line, *args, **kwargs, stacklevel=2)
81+
else:
82+
Logger().get_logger().log(level, msg, *args, **kwargs, stacklevel=2)
83+
84+
@staticmethod
85+
def debug(msg, *args, **kwargs):
86+
"""Output log with the debug level."""
87+
if isinstance(msg, dict):
88+
for _, line in enumerate(_pretty_dict(msg).split("\n")):
89+
Logger().get_logger().debug(line, *args, **kwargs, stacklevel=2)
90+
else:
91+
Logger().get_logger().debug(msg, *args, **kwargs, stacklevel=2)
92+
93+
@staticmethod
94+
def error(msg, *args, **kwargs):
95+
"""Output log with the error level."""
96+
if isinstance(msg, dict):
97+
for _, line in enumerate(_pretty_dict(msg).split("\n")):
98+
Logger().get_logger().error(line, *args, **kwargs, stacklevel=2)
99+
else:
100+
Logger().get_logger().error(msg, *args, **kwargs, stacklevel=2)
101+
102+
@staticmethod
103+
def fatal(msg, *args, **kwargs):
104+
"""Output log with the fatal level."""
105+
if isinstance(msg, dict):
106+
for _, line in enumerate(_pretty_dict(msg).split("\n")):
107+
Logger().get_logger().fatal(line, *args, **kwargs, stacklevel=2)
108+
else:
109+
Logger().get_logger().fatal(msg, *args, **kwargs, stacklevel=2)
110+
111+
@staticmethod
112+
def info(msg, *args, **kwargs):
113+
"""Output log with the info level."""
114+
if isinstance(msg, dict):
115+
for _, line in enumerate(_pretty_dict(msg).split("\n")):
116+
Logger().get_logger().info(line, *args, **kwargs, stacklevel=2)
117+
else:
118+
Logger().get_logger().info(msg, *args, **kwargs, stacklevel=2)
119+
120+
@staticmethod
121+
def warning(msg, *args, **kwargs):
122+
"""Output log with the warning level (Alias of the method warn)."""
123+
if isinstance(msg, dict):
124+
for _, line in enumerate(_pretty_dict(msg).split("\n")):
125+
Logger().get_logger().warning(line, *args, **kwargs, stacklevel=2)
126+
else:
127+
Logger().get_logger().warning(msg, *args, **kwargs, stacklevel=2)
68128

69129

70130
level = Logger().get_logger().level
71-
DEBUG = logging.DEBUG
72-
73-
74-
def log(level, msg, *args, **kwargs):
75-
"""Output log with the level as a parameter."""
76-
if isinstance(msg, dict):
77-
for _, line in enumerate(_pretty_dict(msg).split("\n")):
78-
Logger().get_logger().log(level, line, *args, **kwargs)
79-
else:
80-
Logger().get_logger().log(level, msg, *args, **kwargs)
81-
82-
83-
def debug(msg, *args, **kwargs):
84-
"""Output log with the debug level."""
85-
if isinstance(msg, dict):
86-
for _, line in enumerate(_pretty_dict(msg).split("\n")):
87-
Logger().get_logger().debug(line, *args, **kwargs)
88-
else:
89-
Logger().get_logger().debug(msg, *args, **kwargs)
90-
91131

92-
def error(msg, *args, **kwargs):
93-
"""Output log with the error level."""
94-
if isinstance(msg, dict):
95-
for _, line in enumerate(_pretty_dict(msg).split("\n")):
96-
Logger().get_logger().error(line, *args, **kwargs)
97-
else:
98-
Logger().get_logger().error(msg, *args, **kwargs)
99-
100-
101-
def fatal(msg, *args, **kwargs):
102-
"""Output log with the fatal level."""
103-
if isinstance(msg, dict):
104-
for _, line in enumerate(_pretty_dict(msg).split("\n")):
105-
Logger().get_logger().fatal(line, *args, **kwargs)
106-
else:
107-
Logger().get_logger().fatal(msg, *args, **kwargs)
108-
109-
110-
def info(msg, *args, **kwargs):
111-
"""Output log with the info level."""
112-
if isinstance(msg, dict):
113-
for _, line in enumerate(_pretty_dict(msg).split("\n")):
114-
Logger().get_logger().info(line, *args, **kwargs)
115-
else:
116-
Logger().get_logger().info(msg, *args, **kwargs)
117-
118-
119-
def warn(msg, *args, **kwargs):
120-
"""Output log with the warning level."""
121-
if isinstance(msg, dict):
122-
for _, line in enumerate(_pretty_dict(msg).split("\n")):
123-
Logger().get_logger().warning(line, *args, **kwargs)
124-
else:
125-
Logger().get_logger().warning(msg, *args, **kwargs)
126-
127-
128-
def warning(msg, *args, **kwargs):
129-
"""Output log with the warning level (Alias of the method warn)."""
130-
if isinstance(msg, dict):
131-
for _, line in enumerate(_pretty_dict(msg).split("\n")):
132-
Logger().get_logger().warning(line, *args, **kwargs)
133-
else:
134-
Logger().get_logger().warning(msg, *args, **kwargs)
132+
logger = Logger

neural_compressor/compression/pruner/README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -343,6 +343,7 @@ The following section exemplifies how to use hooks in user pass-in training func
343343
on_after_optimizer_step() # Update weights' criteria, mask weights
344344
on_train_end() # End of pruner, print sparse information
345345
"""
346+
346347
from neural_compressor.training import prepare_compression, WeightPruningConfig
347348

348349
config = WeightPruningConfig(configs)

neural_compressor/torch/quantization/modules.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
from torch.autograd import Function
2626
from torch.nn import functional as F
2727

28-
from neural_compressor.common import DEBUG, level, logger
28+
from neural_compressor.common import logger
2929
from neural_compressor.torch.algorithms.weight_only import quant_tensor
3030

3131

0 commit comments

Comments
 (0)