@@ -68,7 +68,7 @@ def __init__(
6868                in_channels = input_channels ,
6969                out_channels = conv3d_channels ,
7070                kernel_size = (kernel_size , kernel_size , kernel_size ),
71-                 padding = (1 ,0 , 0 ),
71+                 padding = (1 ,  0 ,  0 ),
7272            )
7373        )
7474        for  i  in  range (0 , num_layers ):
@@ -77,7 +77,7 @@ def __init__(
7777                    in_channels = conv3d_channels ,
7878                    out_channels = conv3d_channels ,
7979                    kernel_size = (kernel_size , kernel_size , kernel_size ),
80-                     padding = (1 ,0 , 0 ),
80+                     padding = (1 ,  0 ,  0 ),
8181                )
8282            )
8383
@@ -95,9 +95,7 @@ def __init__(
9595        # Small head model to convert from latent space to PV generation for training 
9696        # Input is per-pixel input data, this will be 
9797        # reshaped to the same output steps as the latent head 
98-         self .pv_meta_input  =  nn .Linear (
99-             pv_meta_input_channels , out_features = hidden_dim 
100-         )
98+         self .pv_meta_input  =  nn .Linear (pv_meta_input_channels , out_features = hidden_dim )
10199
102100        # Output is forecast steps channels, each channel is a timestep 
103101        # For labelling, this should be 1, forecasting the middle 
@@ -142,7 +140,5 @@ def forward(self, x: torch.Tensor, pv_meta: torch.Tensor = None, output_latents:
142140        x  =  torch .cat ([x , pv_meta ], dim = 1 )
143141        # Get pv_meta_output 
144142        x  =  self .pv_meta_output (x )
145-         x  =  F .relu (
146-             self .pv_meta_output2 (x )
147-         )  # Generation can only be positive or 0, so ReLU 
143+         x  =  F .relu (self .pv_meta_output2 (x ))  # Generation can only be positive or 0, so ReLU 
148144        return  x 
0 commit comments