Skip to content

Commit 9002937

Browse files
authored
[fix] Be able to display error messages in additional info as it is (#225)
* [feat] Be able to display the additional info as it is * [fix] Fix dict_repr to be able to detect None * [fix] Fix a phrase
1 parent 3f51d8b commit 9002937

File tree

6 files changed

+53
-46
lines changed

6 files changed

+53
-46
lines changed

autoPyTorch/api/base_task.py

Lines changed: 26 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747
from autoPyTorch.pipeline.components.setup.traditional_ml.traditional_learner import get_available_traditional_learners
4848
from autoPyTorch.pipeline.components.training.metrics.base import autoPyTorchMetric
4949
from autoPyTorch.pipeline.components.training.metrics.utils import calculate_score, get_metrics
50-
from autoPyTorch.utils.common import FitRequirement, replace_string_bool_to_bool
50+
from autoPyTorch.utils.common import FitRequirement, dict_repr, replace_string_bool_to_bool
5151
from autoPyTorch.utils.hyperparameter_search_space_update import HyperparameterSearchSpaceUpdates
5252
from autoPyTorch.utils.logging_ import (
5353
PicklableClientLogger,
@@ -525,39 +525,28 @@ def _do_dummy_prediction(self) -> None:
525525
all_supported_metrics=self._all_supported_metrics
526526
)
527527

528-
status, cost, runtime, additional_info = ta.run(num_run, cutoff=self._time_for_task)
528+
status, _, _, additional_info = ta.run(num_run, cutoff=self._time_for_task)
529529
if status == StatusType.SUCCESS:
530530
self._logger.info("Finished creating dummy predictions.")
531531
else:
532532
if additional_info.get('exitcode') == -6:
533-
self._logger.error(
534-
"Dummy prediction failed with run state %s. "
535-
"The error suggests that the provided memory limits were too tight. Please "
536-
"increase the 'ml_memory_limit' and try again. If this does not solve your "
537-
"problem, please open an issue and paste the additional output. "
538-
"Additional output: %s.",
539-
str(status), str(additional_info),
540-
)
533+
err_msg = "Dummy prediction failed with run state {},\n" \
534+
"because the provided memory limits were too tight.\n" \
535+
"Please increase the 'ml_memory_limit' and try again.\n" \
536+
"If you still get the problem, please open an issue and\n" \
537+
"paste the additional info.\n" \
538+
"Additional info:\n{}.".format(str(status), dict_repr(additional_info))
539+
self._logger.error(err_msg)
541540
# Fail if dummy prediction fails.
542-
raise ValueError(
543-
"Dummy prediction failed with run state %s. "
544-
"The error suggests that the provided memory limits were too tight. Please "
545-
"increase the 'ml_memory_limit' and try again. If this does not solve your "
546-
"problem, please open an issue and paste the additional output. "
547-
"Additional output: %s." %
548-
(str(status), str(additional_info)),
549-
)
541+
raise ValueError(err_msg)
550542

551543
else:
552-
self._logger.error(
553-
"Dummy prediction failed with run state %s and additional output: %s.",
554-
str(status), str(additional_info),
544+
err_msg = "Dummy prediction failed with run state {} and additional info:\n{}.".format(
545+
str(status), dict_repr(additional_info)
555546
)
547+
self._logger.error(err_msg)
556548
# Fail if dummy prediction fails.
557-
raise ValueError(
558-
"Dummy prediction failed with run state %s and additional output: %s."
559-
% (str(status), str(additional_info))
560-
)
549+
raise ValueError(err_msg)
561550

562551
def _do_traditional_prediction(self, time_left: int, func_eval_time_limit_secs: int) -> None:
563552
"""
@@ -652,7 +641,9 @@ def _do_traditional_prediction(self, time_left: int, func_eval_time_limit_secs:
652641
status, cost, runtime, additional_info = future.result()
653642
if status == StatusType.SUCCESS:
654643
self._logger.info(
655-
f"Fitting {cls} took {runtime}s, performance:{cost}/{additional_info}")
644+
"Fitting {} took {} [sec] and got performance: {}.\n"
645+
"additional info:\n{}".format(cls, runtime, cost, dict_repr(additional_info))
646+
)
656647
configuration = additional_info['pipeline_configuration']
657648
origin = additional_info['configuration_origin']
658649
additional_info.pop('pipeline_configuration')
@@ -663,17 +654,18 @@ def _do_traditional_prediction(self, time_left: int, func_eval_time_limit_secs:
663654
else:
664655
if additional_info.get('exitcode') == -6:
665656
self._logger.error(
666-
"Traditional prediction for %s failed with run state %s. "
667-
"The error suggests that the provided memory limits were too tight. Please "
668-
"increase the 'ml_memory_limit' and try again. If this does not solve your "
669-
"problem, please open an issue and paste the additional output. "
670-
"Additional output: %s.",
671-
cls, str(status), str(additional_info),
657+
"Traditional prediction for {} failed with run state {},\n"
658+
"because the provided memory limits were too tight.\n"
659+
"Please increase the 'ml_memory_limit' and try again.\n"
660+
"If you still get the problem, please open an issue\n"
661+
"and paste the additional info.\n"
662+
"Additional info:\n{}".format(cls, str(status), dict_repr(additional_info))
672663
)
673664
else:
674665
self._logger.error(
675-
"Traditional prediction for %s failed with run state %s and additional output: %s.",
676-
cls, str(status), str(additional_info),
666+
"Traditional prediction for {} failed with run state {}.\nAdditional info:\n{}".format(
667+
cls, str(status), dict_repr(additional_info)
668+
)
677669
)
678670

679671
# In the case of a serial execution, calling submit halts the run for a resource

autoPyTorch/evaluation/abstract_evaluator.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@
4242
calculate_loss,
4343
get_metrics,
4444
)
45-
from autoPyTorch.utils.common import subsampler
45+
from autoPyTorch.utils.common import dict_repr, subsampler
4646
from autoPyTorch.utils.hyperparameter_search_space_update import HyperparameterSearchSpaceUpdates
4747
from autoPyTorch.utils.logging_ import PicklableClientLogger, get_named_client_logger
4848
from autoPyTorch.utils.pipeline import get_dataset_requirements
@@ -537,7 +537,7 @@ def __init__(self, backend: Backend,
537537
self.Y_actual_train: Optional[np.ndarray] = None
538538
self.pipelines: Optional[List[BaseEstimator]] = None
539539
self.pipeline: Optional[BaseEstimator] = None
540-
self.logger.debug("Fit dictionary in Abstract evaluator: {}".format(self.fit_dictionary))
540+
self.logger.debug("Fit dictionary in Abstract evaluator: {}".format(dict_repr(self.fit_dictionary)))
541541
self.logger.debug("Search space updates :{}".format(self.search_space_updates))
542542

543543
def _get_pipeline(self) -> BaseEstimator:

autoPyTorch/evaluation/tae.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
from autoPyTorch.automl_common.common.utils.backend import Backend
2727
from autoPyTorch.evaluation.utils import empty_queue, extract_learning_curve, read_queue
2828
from autoPyTorch.pipeline.components.training.metrics.base import autoPyTorchMetric
29-
from autoPyTorch.utils.common import replace_string_bool_to_bool
29+
from autoPyTorch.utils.common import dict_repr, replace_string_bool_to_bool
3030
from autoPyTorch.utils.hyperparameter_search_space_update import HyperparameterSearchSpaceUpdates
3131
from autoPyTorch.utils.logging_ import PicklableClientLogger, get_named_client_logger
3232
from autoPyTorch.utils.parallel import preload_modules
@@ -459,7 +459,14 @@ def run(
459459

460460
empty_queue(queue)
461461
self.logger.debug(
462-
'Finished function evaluation %s. Status: %s, Cost: %f, Runtime: %f, Additional %s',
463-
str(num_run), status, cost, runtime, additional_run_info,
462+
"Finish function evaluation {}.\n"
463+
"Status: {}, Cost: {}, Runtime: {},\n"
464+
"Additional information:\n{}".format(
465+
str(num_run),
466+
status,
467+
cost,
468+
runtime,
469+
dict_repr(additional_run_info)
470+
)
464471
)
465472
return status, cost, runtime, additional_run_info

autoPyTorch/evaluation/train_evaluator.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
fit_and_suppress_warnings
2020
)
2121
from autoPyTorch.pipeline.components.training.metrics.base import autoPyTorchMetric
22-
from autoPyTorch.utils.common import subsampler
22+
from autoPyTorch.utils.common import dict_repr, subsampler
2323
from autoPyTorch.utils.hyperparameter_search_space_update import HyperparameterSearchSpaceUpdates
2424

2525
__all__ = ['TrainEvaluator', 'eval_function']
@@ -172,11 +172,11 @@ def fit_predict_and_loss(self) -> None:
172172

173173
status = StatusType.SUCCESS
174174

175-
self.logger.debug("In train evaluator fit_predict_and_loss, num_run: {} loss:{},"
176-
" additional run info:{}, status: {}".format(self.num_run,
177-
loss,
178-
additional_run_info,
179-
status))
175+
self.logger.debug("In train evaluator.fit_predict_and_loss, num_run: {} loss:{},"
176+
" status: {},\nadditional run info:\n{}".format(self.num_run,
177+
loss,
178+
dict_repr(additional_run_info),
179+
status))
180180
self.finish_up(
181181
loss=loss,
182182
train_loss=train_loss,

autoPyTorch/optimizer/smbo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@ def run_smbo(self, func: typing.Optional[typing.Callable] = None
300300
pynisher_context=self.pynisher_context,
301301
)
302302
ta = ExecuteTaFuncWithQueue
303-
self.logger.info("Created TA")
303+
self.logger.info("Finish creating Target Algorithm (TA) function")
304304

305305
startup_time = self.watcher.wall_elapsed(self.dataset_name)
306306
total_walltime_limit = self.total_walltime_limit - startup_time - 5

autoPyTorch/utils/common.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,14 @@ def custom_collate_fn(batch: List) -> List[Optional[torch.tensor]]:
102102
return items
103103

104104

105+
def dict_repr(d: Optional[Dict[Any, Any]]) -> str:
106+
""" Display long message in dict as it is. """
107+
if isinstance(d, dict):
108+
return "\n".join(["{}: {}".format(k, v) for k, v in d.items()])
109+
else:
110+
return "None"
111+
112+
105113
def replace_string_bool_to_bool(dictionary: Dict[str, Any]) -> Dict[str, Any]:
106114
"""
107115
Utility function to replace string-type bool to

0 commit comments

Comments
 (0)