4747from autoPyTorch .pipeline .components .setup .traditional_ml .traditional_learner import get_available_traditional_learners
4848from autoPyTorch .pipeline .components .training .metrics .base import autoPyTorchMetric
4949from autoPyTorch .pipeline .components .training .metrics .utils import calculate_score , get_metrics
50- from autoPyTorch .utils .common import FitRequirement , replace_string_bool_to_bool
50+ from autoPyTorch .utils .common import FitRequirement , dict_repr , replace_string_bool_to_bool
5151from autoPyTorch .utils .hyperparameter_search_space_update import HyperparameterSearchSpaceUpdates
5252from autoPyTorch .utils .logging_ import (
5353 PicklableClientLogger ,
@@ -525,39 +525,28 @@ def _do_dummy_prediction(self) -> None:
525525 all_supported_metrics = self ._all_supported_metrics
526526 )
527527
528- status , cost , runtime , additional_info = ta .run (num_run , cutoff = self ._time_for_task )
528+ status , _ , _ , additional_info = ta .run (num_run , cutoff = self ._time_for_task )
529529 if status == StatusType .SUCCESS :
530530 self ._logger .info ("Finished creating dummy predictions." )
531531 else :
532532 if additional_info .get ('exitcode' ) == - 6 :
533- self ._logger .error (
534- "Dummy prediction failed with run state %s. "
535- "The error suggests that the provided memory limits were too tight. Please "
536- "increase the 'ml_memory_limit' and try again. If this does not solve your "
537- "problem, please open an issue and paste the additional output. "
538- "Additional output: %s." ,
539- str (status ), str (additional_info ),
540- )
533+ err_msg = "Dummy prediction failed with run state {},\n " \
534+ "because the provided memory limits were too tight.\n " \
535+ "Please increase the 'ml_memory_limit' and try again.\n " \
536+ "If you still get the problem, please open an issue and\n " \
537+ "paste the additional info.\n " \
538+ "Additional info:\n {}." .format (str (status ), dict_repr (additional_info ))
539+ self ._logger .error (err_msg )
541540 # Fail if dummy prediction fails.
542- raise ValueError (
543- "Dummy prediction failed with run state %s. "
544- "The error suggests that the provided memory limits were too tight. Please "
545- "increase the 'ml_memory_limit' and try again. If this does not solve your "
546- "problem, please open an issue and paste the additional output. "
547- "Additional output: %s." %
548- (str (status ), str (additional_info )),
549- )
541+ raise ValueError (err_msg )
550542
551543 else :
552- self ._logger .error (
553- "Dummy prediction failed with run state %s and additional output: %s." ,
554- str (status ), str (additional_info ),
544+ err_msg = "Dummy prediction failed with run state {} and additional info:\n {}." .format (
545+ str (status ), dict_repr (additional_info )
555546 )
547+ self ._logger .error (err_msg )
556548 # Fail if dummy prediction fails.
557- raise ValueError (
558- "Dummy prediction failed with run state %s and additional output: %s."
559- % (str (status ), str (additional_info ))
560- )
549+ raise ValueError (err_msg )
561550
562551 def _do_traditional_prediction (self , time_left : int , func_eval_time_limit_secs : int ) -> None :
563552 """
@@ -652,7 +641,9 @@ def _do_traditional_prediction(self, time_left: int, func_eval_time_limit_secs:
652641 status , cost , runtime , additional_info = future .result ()
653642 if status == StatusType .SUCCESS :
654643 self ._logger .info (
655- f"Fitting { cls } took { runtime } s, performance:{ cost } /{ additional_info } " )
644+ "Fitting {} took {} [sec] and got performance: {}.\n "
645+ "additional info:\n {}" .format (cls , runtime , cost , dict_repr (additional_info ))
646+ )
656647 configuration = additional_info ['pipeline_configuration' ]
657648 origin = additional_info ['configuration_origin' ]
658649 additional_info .pop ('pipeline_configuration' )
@@ -663,17 +654,18 @@ def _do_traditional_prediction(self, time_left: int, func_eval_time_limit_secs:
663654 else :
664655 if additional_info .get ('exitcode' ) == - 6 :
665656 self ._logger .error (
666- "Traditional prediction for %s failed with run state %s. "
667- "The error suggests that the provided memory limits were too tight. Please "
668- "increase the 'ml_memory_limit' and try again. If this does not solve your "
669- "problem, please open an issue and paste the additional output. "
670- "Additional output: %s." ,
671- cls , str (status ), str (additional_info ),
657+ "Traditional prediction for {} failed with run state {}, \n "
658+ "because the provided memory limits were too tight.\n "
659+ "Please increase the 'ml_memory_limit' and try again.\n "
660+ "If you still get the problem, please open an issue\n "
661+ "and paste the additional info. \n "
662+ "Additional info: \n {}" . format ( cls , str (status ), dict_repr (additional_info ))
672663 )
673664 else :
674665 self ._logger .error (
675- "Traditional prediction for %s failed with run state %s and additional output: %s." ,
676- cls , str (status ), str (additional_info ),
666+ "Traditional prediction for {} failed with run state {}.\n Additional info:\n {}" .format (
667+ cls , str (status ), dict_repr (additional_info )
668+ )
677669 )
678670
679671 # In the case of a serial execution, calling submit halts the run for a resource
0 commit comments