@@ -36,7 +36,7 @@ with AutoPyTorch
3636
3737 .. code-block :: none
3838
39- <smac.runhistory.runhistory.RunHistory object at 0x7fdfa8dea670 > [TrajEntry(train_perf=2147483648, incumbent_id=1, incumbent=Configuration:
39+ <smac.runhistory.runhistory.RunHistory object at 0x7f27c9064400 > [TrajEntry(train_perf=2147483648, incumbent_id=1, incumbent=Configuration:
4040 data_loader:batch_size, Value: 64
4141 encoder:__choice__, Value: 'OneHotEncoder'
4242 feature_preprocessor:__choice__, Value: 'NoFeaturePreprocessor'
@@ -67,7 +67,7 @@ with AutoPyTorch
6767 optimizer:__choice__, Value: 'AdamOptimizer'
6868 scaler:__choice__, Value: 'StandardScaler'
6969 trainer:__choice__, Value: 'StandardTrainer'
70- , ta_runs=0, ta_time_used=0.0, wallclock_time=0.0019443035125732422 , budget=0), TrajEntry(train_perf=0.8271799717423096 , incumbent_id=1, incumbent=Configuration:
70+ , ta_runs=0, ta_time_used=0.0, wallclock_time=0.002293109893798828 , budget=0), TrajEntry(train_perf=0.7575204056297503 , incumbent_id=1, incumbent=Configuration:
7171 data_loader:batch_size, Value: 64
7272 encoder:__choice__, Value: 'OneHotEncoder'
7373 feature_preprocessor:__choice__, Value: 'NoFeaturePreprocessor'
@@ -98,73 +98,11 @@ with AutoPyTorch
9898 optimizer:__choice__, Value: 'AdamOptimizer'
9999 scaler:__choice__, Value: 'StandardScaler'
100100 trainer:__choice__, Value: 'StandardTrainer'
101- , ta_runs=1, ta_time_used=4.774270296096802, wallclock_time=8.881023645401001, budget=5.555555555555555), TrajEntry(train_perf=0.5705056242970682, incumbent_id=2, incumbent=Configuration:
102- data_loader:batch_size, Value: 220
103- encoder:__choice__, Value: 'OneHotEncoder'
104- feature_preprocessor:PowerTransformer:standardize, Value: True
105- feature_preprocessor:__choice__, Value: 'PowerTransformer'
106- imputer:categorical_strategy, Value: 'constant_!missing!'
107- imputer:numerical_strategy, Value: 'median'
108- lr_scheduler:ExponentialLR:gamma, Value: 0.7297909296891054
109- lr_scheduler:__choice__, Value: 'ExponentialLR'
110- network_backbone:MLPBackbone:activation, Value: 'sigmoid'
111- network_backbone:MLPBackbone:num_groups, Value: 1
112- network_backbone:MLPBackbone:num_units_1, Value: 53
113- network_backbone:MLPBackbone:use_dropout, Value: False
114- network_backbone:__choice__, Value: 'MLPBackbone'
115- network_embedding:LearnedEntityEmbedding:dimension_reduction_0, Value: 0.49162398882471625
116- network_embedding:LearnedEntityEmbedding:dimension_reduction_1, Value: 0.9738223543779865
117- network_embedding:LearnedEntityEmbedding:min_unique_values_for_embedding, Value: 3
118- network_embedding:__choice__, Value: 'LearnedEntityEmbedding'
119- network_head:__choice__, Value: 'fully_connected'
120- network_head:fully_connected:activation, Value: 'tanh'
121- network_head:fully_connected:num_layers, Value: 2
122- network_head:fully_connected:units_layer_1, Value: 84
123- network_init:XavierInit:bias_strategy, Value: 'Normal'
124- network_init:__choice__, Value: 'XavierInit'
125- optimizer:SGDOptimizer:lr, Value: 0.07384606967030707
126- optimizer:SGDOptimizer:momentum, Value: 0.41566496944512654
127- optimizer:SGDOptimizer:weight_decay, Value: 0.002766441795903385
128- optimizer:__choice__, Value: 'SGDOptimizer'
129- scaler:__choice__, Value: 'MinMaxScaler'
130- trainer:__choice__, Value: 'StandardTrainer'
131- , ta_runs=2, ta_time_used=9.929801225662231, wallclock_time=15.717073202133179, budget=5.555555555555555), TrajEntry(train_perf=0.21935886392409365, incumbent_id=3, incumbent=Configuration:
132- data_loader:batch_size, Value: 64
133- encoder:__choice__, Value: 'OneHotEncoder'
134- feature_preprocessor:__choice__, Value: 'NoFeaturePreprocessor'
135- imputer:categorical_strategy, Value: 'most_frequent'
136- imputer:numerical_strategy, Value: 'mean'
137- lr_scheduler:ReduceLROnPlateau:factor, Value: 0.1
138- lr_scheduler:ReduceLROnPlateau:mode, Value: 'min'
139- lr_scheduler:ReduceLROnPlateau:patience, Value: 10
140- lr_scheduler:__choice__, Value: 'ReduceLROnPlateau'
141- network_backbone:ShapedMLPBackbone:activation, Value: 'relu'
142- network_backbone:ShapedMLPBackbone:max_units, Value: 200
143- network_backbone:ShapedMLPBackbone:mlp_shape, Value: 'funnel'
144- network_backbone:ShapedMLPBackbone:num_groups, Value: 5
145- network_backbone:ShapedMLPBackbone:output_dim, Value: 200
146- network_backbone:ShapedMLPBackbone:use_dropout, Value: False
147- network_backbone:__choice__, Value: 'ShapedMLPBackbone'
148- network_embedding:__choice__, Value: 'NoEmbedding'
149- network_head:__choice__, Value: 'fully_connected'
150- network_head:fully_connected:activation, Value: 'relu'
151- network_head:fully_connected:num_layers, Value: 2
152- network_head:fully_connected:units_layer_1, Value: 128
153- network_init:XavierInit:bias_strategy, Value: 'Normal'
154- network_init:__choice__, Value: 'XavierInit'
155- optimizer:AdamOptimizer:beta1, Value: 0.9
156- optimizer:AdamOptimizer:beta2, Value: 0.9
157- optimizer:AdamOptimizer:lr, Value: 0.01
158- optimizer:AdamOptimizer:weight_decay, Value: 0.0
159- optimizer:__choice__, Value: 'AdamOptimizer'
160- scaler:__choice__, Value: 'StandardScaler'
161- trainer:__choice__, Value: 'StandardTrainer'
162- , ta_runs=11, ta_time_used=64.2962703704834, wallclock_time=93.74913358688354, budget=16.666666666666664)]
163- {'r2': 0.9189154709278342}
101+ , ta_runs=1, ta_time_used=4.336726665496826, wallclock_time=8.26810622215271, budget=5.555555555555555)]
102+ {'r2': 0.9135738588575801}
164103 | | Preprocessing | Estimator | Weight |
165104 |---:|:------------------------------------------------------------------|:----------------------------------------------------------------|---------:|
166- | 0 | SimpleImputer,OneHotEncoder,StandardScaler,NoFeaturePreprocessing | no embedding,ShapedMLPBackbone,FullyConnectedHead,nn.Sequential | 0.64 |
167- | 1 | SimpleImputer,OneHotEncoder,StandardScaler,NoFeaturePreprocessing | no embedding,ShapedMLPBackbone,FullyConnectedHead,nn.Sequential | 0.36 |
105+ | 0 | SimpleImputer,OneHotEncoder,StandardScaler,NoFeaturePreprocessing | no embedding,ShapedMLPBackbone,FullyConnectedHead,nn.Sequential | 1 |
168106
169107
170108
@@ -250,7 +188,7 @@ with AutoPyTorch
250188
251189 .. rst-class :: sphx-glr-timing
252190
253- **Total running time of the script: ** ( 5 minutes 14.190 seconds)
191+ **Total running time of the script: ** ( 5 minutes 15.642 seconds)
254192
255193
256194.. _sphx_glr_download_examples_20_basics_example_tabular_regression.py :
0 commit comments