From 77da44cb4dd9923111042c5ccdd45eae9b811caf Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Fri, 22 Jan 2021 15:12:29 +0000 Subject: [PATCH 01/10] Making _segm_resnet() generic and reusable. --- .../models/segmentation/segmentation.py | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/torchvision/models/segmentation/segmentation.py b/torchvision/models/segmentation/segmentation.py index 158ba5e3d0e..b384fa2e348 100644 --- a/torchvision/models/segmentation/segmentation.py +++ b/torchvision/models/segmentation/segmentation.py @@ -16,14 +16,19 @@ } -def _segm_resnet(name, backbone_name, num_classes, aux, pretrained_backbone=True): - backbone = resnet.__dict__[backbone_name]( - pretrained=pretrained_backbone, - replace_stride_with_dilation=[False, True, True]) - - return_layers = {'layer4': 'out'} +def _segm_model(name, backbone_name, num_classes, aux, pretrained_backbone=True): + if 'resnet' in backbone_name: + backbone = resnet.__dict__[backbone_name]( + pretrained=pretrained_backbone, + replace_stride_with_dilation=[False, True, True]) + out_layer = 'layer4' + aux_layer = 'layer3' + else: + raise NotImplementedError('backbone {} is not supported as of now'.format(backbone_name)) + + return_layers = {out_layer: 'out'} if aux: - return_layers['layer3'] = 'aux' + return_layers[aux_layer] = 'aux' backbone = IntermediateLayerGetter(backbone, return_layers=return_layers) aux_classifier = None @@ -46,7 +51,7 @@ def _segm_resnet(name, backbone_name, num_classes, aux, pretrained_backbone=True def _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs): if pretrained: aux_loss = True - model = _segm_resnet(arch_type, backbone, num_classes, aux_loss, **kwargs) + model = _segm_model(arch_type, backbone, num_classes, aux_loss, **kwargs) if pretrained: arch = arch_type + '_' + backbone + '_coco' model_url = model_urls[arch] From 462d59a904c919935aaba6809c6556737ff1381e Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Fri, 22 Jan 2021 17:24:50 +0000 Subject: [PATCH 02/10] Adding fcn and deeplabv3 directly on mobilenetv3 backbone. --- .../models/segmentation/segmentation.py | 63 +++++++++++++++++-- 1 file changed, 58 insertions(+), 5 deletions(-) diff --git a/torchvision/models/segmentation/segmentation.py b/torchvision/models/segmentation/segmentation.py index b384fa2e348..0418be3a45a 100644 --- a/torchvision/models/segmentation/segmentation.py +++ b/torchvision/models/segmentation/segmentation.py @@ -1,18 +1,22 @@ from .._utils import IntermediateLayerGetter from ..utils import load_state_dict_from_url +from .. import mobilenet from .. import resnet from .deeplabv3 import DeepLabHead, DeepLabV3 from .fcn import FCN, FCNHead -__all__ = ['fcn_resnet50', 'fcn_resnet101', 'deeplabv3_resnet50', 'deeplabv3_resnet101'] +__all__ = ['fcn_resnet50', 'fcn_resnet101', 'fcn_mobilenet_v3_large', 'deeplabv3_resnet50', 'deeplabv3_resnet101', + 'deeplabv3_mobilenet_v3_large'] model_urls = { 'fcn_resnet50_coco': 'https://download.pytorch.org/models/fcn_resnet50_coco-1167a1af.pth', 'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth', + 'fcn_mobilenet_v3_large_coco': None, 'deeplabv3_resnet50_coco': 'https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth', 'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth', + 'deeplabv3_mobilenet_v3_large_coco': None, } @@ -22,7 +26,22 @@ def _segm_model(name, backbone_name, num_classes, aux, pretrained_backbone=True) pretrained=pretrained_backbone, replace_stride_with_dilation=[False, True, True]) out_layer = 'layer4' + out_inplanes = 2048 aux_layer = 'layer3' + aux_inplanes = 1024 + elif 'mobilenet' in backbone_name: + backbone = mobilenet.__dict__[backbone_name](pretrained=pretrained_backbone).features + + # Gather the indeces of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. + # The first and last blocks are always included because they are the C0 (conv1) and Cn. + stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "is_strided", False)] + [ + len(backbone) - 1] + out_pos = stage_indices[-1] + out_layer = str(out_pos) + out_inplanes = backbone[out_pos].out_channels + aux_pos = stage_indices[-2] + aux_layer = str(aux_pos) + aux_inplanes = backbone[aux_pos].out_channels else: raise NotImplementedError('backbone {} is not supported as of now'.format(backbone_name)) @@ -33,15 +52,13 @@ def _segm_model(name, backbone_name, num_classes, aux, pretrained_backbone=True) aux_classifier = None if aux: - inplanes = 1024 - aux_classifier = FCNHead(inplanes, num_classes) + aux_classifier = FCNHead(aux_inplanes, num_classes) model_map = { 'deeplabv3': (DeepLabHead, DeepLabV3), 'fcn': (FCNHead, FCN), } - inplanes = 2048 - classifier = model_map[name][0](inplanes, num_classes) + classifier = model_map[name][0](out_inplanes, num_classes) base_model = model_map[name][1] model = base_model(backbone, classifier, aux_classifier) @@ -71,6 +88,8 @@ def fcn_resnet50(pretrained=False, progress=True, pretrained (bool): If True, returns a model pre-trained on COCO train2017 which contains the same classes as Pascal VOC progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + aux_loss (bool): If True, it uses an auxiliary loss """ return _load_model('fcn', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs) @@ -83,10 +102,26 @@ def fcn_resnet101(pretrained=False, progress=True, pretrained (bool): If True, returns a model pre-trained on COCO train2017 which contains the same classes as Pascal VOC progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + aux_loss (bool): If True, it uses an auxiliary loss """ return _load_model('fcn', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs) +def fcn_mobilenet_v3_large(pretrained=False, progress=True, + num_classes=21, aux_loss=None, **kwargs): + """Constructs a Fully-Convolutional Network model with a MobileNetV3-Large backbone. + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 which + contains the same classes as Pascal VOC + progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + aux_loss (bool): If True, it uses an auxiliary loss + """ + return _load_model('fcn', 'mobilenet_v3_large', pretrained, progress, num_classes, aux_loss, **kwargs) + + def deeplabv3_resnet50(pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs): """Constructs a DeepLabV3 model with a ResNet-50 backbone. @@ -95,6 +130,8 @@ def deeplabv3_resnet50(pretrained=False, progress=True, pretrained (bool): If True, returns a model pre-trained on COCO train2017 which contains the same classes as Pascal VOC progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + aux_loss (bool): If True, it uses an auxiliary loss """ return _load_model('deeplabv3', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs) @@ -107,5 +144,21 @@ def deeplabv3_resnet101(pretrained=False, progress=True, pretrained (bool): If True, returns a model pre-trained on COCO train2017 which contains the same classes as Pascal VOC progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + aux_loss (bool): If True, it uses an auxiliary loss """ return _load_model('deeplabv3', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs) + + +def deeplabv3_mobilenet_v3_large(pretrained=False, progress=True, + num_classes=21, aux_loss=None, **kwargs): + """Constructs a DeepLabV3 model with a MobileNetV3-Large backbone. + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 which + contains the same classes as Pascal VOC + progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + aux_loss (bool): If True, it uses an auxiliary loss + """ + return _load_model('deeplabv3', 'mobilenet_v3_large', pretrained, progress, num_classes, aux_loss, **kwargs) From 482d3bd2758c4ab19105538930718b34b7c142dc Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Mon, 25 Jan 2021 11:12:00 +0000 Subject: [PATCH 03/10] Adding tests for segmentation models. --- ...test_deeplabv3_mobilenet_v3_large_expect.pkl | Bin 0 -> 41785 bytes ...ester.test_fcn_mobilenet_v3_large_expect.pkl | Bin 0 -> 41785 bytes test/test_models.py | 2 ++ 3 files changed, 2 insertions(+) create mode 100644 test/expect/ModelTester.test_deeplabv3_mobilenet_v3_large_expect.pkl create mode 100644 test/expect/ModelTester.test_fcn_mobilenet_v3_large_expect.pkl diff --git a/test/expect/ModelTester.test_deeplabv3_mobilenet_v3_large_expect.pkl b/test/expect/ModelTester.test_deeplabv3_mobilenet_v3_large_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..18a00898b981ac0ba8ff4df99dcb453e95c11311 GIT binary patch literal 41785 zcmeHJ&1+m$6o2XDV;sTOxDtwVB@r~qjD;XIh*5($7RO4kbQ8wO&<-7%&g3O)H-3Oh z-Do!=D&kgMbl3O=uC*)I{t41XP+a&9nO{6P-uvFY?<-S!5-#Wa_dDmzS-e)WG8qKb zYVg0m6TxiI9(FGEcDf52?XW$6^>Tl8<$>VBp}#keb;8XkYkqw@?De;5>%;CF+r9qA zdf2_PwK-hhY19V6+}!z@2!AWeY+vjSCiY8etNqP(_}ad+HaK?vc$8B=z1UbTIg@dVAo55UjqFHO6YMu%Q({qvcY%m#F6X9Uy-A-q--|u$9 z-sY99+AG72?y$S@e6JJM2Gz&T4ekpEwR3~}=UdI0>f-X=wHpyMy_R(eCxd4XEnz|Q zx4N<{7IJ7i$;SJCzp$%Mcc3?*H=sA5H=sA5H{f#vAF%HS7u^TjH5a@jxlZ-cevkF* zitmyqait9m^aDGq2~HCa$3stfa&!En&}nAOFXuuGQuc>B z>CelS{p5u!aZGbaAAWnPl`)CuV{FE>lWw*PfGlwCz#y zNjnMGHEx>2YTs~_hW_HUXIu_F2h zpG_9Q8Hb5JY&milvoEjh>eC(Q4d@N%4d@N%4d@N{+`vPl?*~Yq+B|a~6yGthA*sJ2 zdL&HZ;MinyA9843>CbZqNm#_4l#?aS=Oi51oELk7%)-ZOIS2RK^gQ{A+r$qaB=rIv z($=4^KhJr5x%n(y=Q!{e)hBSl6US}J;rVHF8>X*64L8+be(11?A390sX?aX_5|hs? zW4t~t#`BVN@DU!@Q3oH{q*GpA+~DBSgw1;}#d&=i-Gs?~;O9Q#`S`^NeYyj^0lfje z0lfje0lfjQ8@Mj^C2jq_7{BkUkHl|wxBo`tcys6pvaB7C%WpdlbCK1ix{!KNISJl9@KzrFMkpY*xn+wwSGp7YrFq@VL67Ps3ndPGl< z=u6u0XOU&hleWL4d6VKkym?~z@XGyjyNsSVANB3ZDeHV@mbrOiNo*Gz{3PkQ?!W~* zuN+%X;ZuLz)TcYp8_*lj8_*lj8_*l@y@73qZ-^~Jj>(sX8#pAn4lMKsNq*!ZrzlB0 z;N|@hAN*Ok2?MimJgx&Dxrjj`o<#x+lCWI#ES%$3E9b@U!u#YmV2bnHG&(RLO&D=8 zQ@)FjF!7L&eLzY-?5vXTT{(Cj*VFbOcbNyKZ!g4&J|H=U{9Pz!%HuV@y5un7^BmIi z-WB2Hjp?!n%$hV4?~Us&97#E@cs!5vJ6}G!t50{JH=sA5H=sA5H=s9=^#*Pyd{ZRF zefakxE_$+tb0AJW{$x$rHt#VHZp_KC$Y-GpKC0Shrm_|r<$Y$jh?e4jeb zOFDmqlh@2XJ=t>e>V?ltnA|3AY1{hA;+MQ~wj?_X_fgm4#&{p(qnR;x(Z2!scn{Fg z1MD(o(Q}(-;qyX=H02lNGjW=H-~93C)B1D=dINd`dINd`dINd`J~!}f^!@Pl=o^A` zUdwHn$78(4S0`?IHat^rwp?FsY~ED}A~Bw>R$3r}1vxG@(=_^6KSaX4vW zzy>!k;FtW|kKC-1Fgd@8gU7S-$(hIozU&2l&S~=TxT_8v;>5GnVkeNcyt~D3niud_103=QEYLW7#kD zag_YjPaHqrlk7c->V0yEOJajx`Z?~M);~A( z=??S;^ak_>^ak_>^aj%2!0m)@35qA>q~YEh@e(%a^h`Y5$MrOv<5EL&U>3|yy}=J3 zN%%>^pT``)hIGaB&3DC>dDsv6xi0h4+Q5XAJeZTDEhjBEujAO@Bp>%9pM1!HFRSEu zT+fQ@T2GvqRq~T9eXh7kc@kIJN%dLdn3-^(+j>AeD<|qX9&||F56?#&60tPWHm40M z4^A7l?1giY&f{FS;koz;$A;zNZ#8GCU%lR43qA`1daY(BYvs4aKinHsrYmg{t zfQXL*A)!DGNEDGM;tKHts3`d;fP$1HDS4yp3FBezd+*+N-)2L`=xXMkJ9Ey=yaBe^ z7@tUz>FMM@e+QF8NvqesxW3bwTWj@OvsW%{teki}Ilt%ct$pqOW*RlSy4_#j*s87e zI&W^TZ>+8MJD0aMd#gLgYTaaJ=G;`uzm-O|E_AwMcT;LB8=I~E>vzFgci*`KY0Sdn z{K9g5slK#auP+~4?2exsPto?3?e4_sWTxNUf2NVlG{zdW#?i*nes^*vMNcOa>20jv zoqD(3-rU&ewEOFum$z!K_0~GQ&e|*M?S8F0{q))HBmHjeZ1>UGW@Bpl)XnW{*Hdab zP3I5}Cok<8!knmY<-~C@kUi7MzZ{;*eE1KR=H0owuF0owuF0iPXsGX4F4qvg?? zhR4FPv`K^OlmFD<&>Q(>>)`y73tBOBBzM+**m|fXYZi#9%7$o%jgD^?h8uYq*BFc#Gnk6Fw~G;28IL zOo*SymG?dVIC*3@=J_)+pKooZANPCO{Qc%>`{y?=-~GyqxB1)r^LC`l_lI%+&@5Ap``1;_4 zg^tT2j;ycNsK?_wXVqCGCM&<_8@@k}rcd9hf!U$&anY`vbScNLRxUU&dzj1Wm;7JNdYm(MK*cTzBBG(Ejz)w|DK>3~UE% z2W$sy2W$sy2V(BPFB!itvfj$nz*K)c5DW=1GWRU1GWRU1GWR9JMj15x5RD7ZwkDJ^84md9i$0eSmNQnFRyRB zoJUOUP%c@MZ%;BFeB8pP8fBgIhIh&3b9ZrYocqKr?>>5?7>jJyjLB+EnK+FFLH-AS&DaY;0&!r=22~!KfP61tHxav zlXZ{!9sAAe1a72+LRNME2IYK%O24`e+L$#qsdJ!s0>V(5VbJ&`{5>76iFUC57VN#Cbe{@=sUI7?`G|VKKBc4oyqLwA>h&rlnyqv@{M)OT)C>7?`G| zVKKBc4oyqLwA>h&rlnyqv@{M)OT)C>7?`G|VKKBc4oyqLwA>h&rlnyqv@{M)OT)C> z7?`G|VKKBc4oyqLwA?b-H_HF_i}J;+4|PaRy3<^>Xq|LAMdiRqBDvWSEW92 zW4=@`w4_BZgyR4Uwa|ryCa;6Pk&k<*W=NjY#2s@8wWx*n1undi1;#CZFXIzuUOr;b zH*8*B74Mi;>>AJ`uem73b3%Re1Iu|dOskFO6!A4)V8qXT;)|h~RhNt3g=ca3zL5rW z;G=(EuA=eKLp;BeHoqf=+uL`Z*tKIbupO`+upO`+upO`+2-$%<%I}C<0@Gqbv}BHp zm-Ok4j`Sr*=$&iBhb2z#Lqo&xR)rmg9?e&N@4Q}oaTB_7Qour}ZPz|yU+>tBqz+>5}`KeCQ z&~z}9thZQwjsg$*<+T9A3}6vM@6E>4^!M-de@?C?37uwRd?HDvr_-DGJCMAbw0iA} z>pPvfolb9See?3li5HXe<^GX{Fd;U<- zTC3lhy>e+oX7k<-@5$UhmfY)yvgZ~S=NFdiOZBDYdVTrWqGaMV9@xWl^^1R*29s$v vCMTa61RWWF_Fnk-z=Qo^a6k0rT_??(BJZccJ*_bv$o@&f#?yZ&{%-6)TS1yl literal 0 HcmV?d00001 diff --git a/test/test_models.py b/test/test_models.py index 14880425aed..1e6af5568ee 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -61,8 +61,10 @@ def get_available_video_models(): "wide_resnet101_2", "deeplabv3_resnet50", "deeplabv3_resnet101", + "deeplabv3_mobilenet_v3_large", "fcn_resnet50", "fcn_resnet101", + "fcn_mobilenet_v3_large", ) From f64bfed2ed25124640b0b5f32c82cc519de9beeb Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Mon, 25 Jan 2021 12:57:40 +0000 Subject: [PATCH 04/10] Rename is_strided with _is_cn. --- torchvision/models/detection/backbone_utils.py | 2 +- torchvision/models/mobilenetv2.py | 2 +- torchvision/models/mobilenetv3.py | 2 +- torchvision/models/segmentation/segmentation.py | 3 +-- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/torchvision/models/detection/backbone_utils.py b/torchvision/models/detection/backbone_utils.py index e9a4a7104cf..5c8b6783d83 100644 --- a/torchvision/models/detection/backbone_utils.py +++ b/torchvision/models/detection/backbone_utils.py @@ -138,7 +138,7 @@ def mobilenet_backbone( # Gather the indeces of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. # The first and last blocks are always included because they are the C0 (conv1) and Cn. - stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "is_strided", False)] + [len(backbone) - 1] + stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1] num_stages = len(stage_indices) # find the index of the layer from which we wont freeze diff --git a/torchvision/models/mobilenetv2.py b/torchvision/models/mobilenetv2.py index 93474ae7396..cd3a67e1d7c 100644 --- a/torchvision/models/mobilenetv2.py +++ b/torchvision/models/mobilenetv2.py @@ -88,7 +88,7 @@ def __init__( ]) self.conv = nn.Sequential(*layers) self.out_channels = oup - self.is_strided = stride > 1 + self._is_cn = stride > 1 def forward(self, x: Tensor) -> Tensor: if self.use_res_connect: diff --git a/torchvision/models/mobilenetv3.py b/torchvision/models/mobilenetv3.py index eba4823277b..b1f14128b81 100644 --- a/torchvision/models/mobilenetv3.py +++ b/torchvision/models/mobilenetv3.py @@ -82,7 +82,7 @@ def __init__(self, cnf: InvertedResidualConfig, norm_layer: Callable[..., nn.Mod self.block = nn.Sequential(*layers) self.out_channels = cnf.out_channels - self.is_strided = cnf.stride > 1 + self._is_cn = cnf.stride > 1 def forward(self, input: Tensor) -> Tensor: result = self.block(input) diff --git a/torchvision/models/segmentation/segmentation.py b/torchvision/models/segmentation/segmentation.py index 0418be3a45a..9bb63e1b085 100644 --- a/torchvision/models/segmentation/segmentation.py +++ b/torchvision/models/segmentation/segmentation.py @@ -34,8 +34,7 @@ def _segm_model(name, backbone_name, num_classes, aux, pretrained_backbone=True) # Gather the indeces of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. # The first and last blocks are always included because they are the C0 (conv1) and Cn. - stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "is_strided", False)] + [ - len(backbone) - 1] + stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1] out_pos = stage_indices[-1] out_layer = str(out_pos) out_inplanes = backbone[out_pos].out_channels From 10a51cf4ea39c239c40529b151175c7276034162 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Mon, 25 Jan 2021 15:22:14 +0000 Subject: [PATCH 05/10] Add dilation support on MobileNetV3 for Segmentation. --- ...st_deeplabv3_mobilenet_v3_large_expect.pkl | Bin 41785 -> 41785 bytes ...ter.test_fcn_mobilenet_v3_large_expect.pkl | Bin 41785 -> 41785 bytes torchvision/models/mobilenetv2.py | 6 +- torchvision/models/mobilenetv3.py | 84 +++++++++--------- .../models/segmentation/segmentation.py | 10 +-- 5 files changed, 50 insertions(+), 50 deletions(-) diff --git a/test/expect/ModelTester.test_deeplabv3_mobilenet_v3_large_expect.pkl b/test/expect/ModelTester.test_deeplabv3_mobilenet_v3_large_expect.pkl index 18a00898b981ac0ba8ff4df99dcb453e95c11311..58d6da6c7210da8850ae5f6e4278653b38e35159 100644 GIT binary patch literal 41785 zcmeHIO>bRS6}^s=*gPas!k|K`P)Eo};C$GX6_pARAVNGUOi`t%10vUcYOPvvp8b+B z6ABU_F@a7H3Wzx{=&TViU>rJP%pX8PZIM6>xJ!8}Ebp9s&bjxPzUTTly4qiBt-a5_ z=iO`$7tc1$@^bUU-&4)SX6s=4_1$};OFLVWt<^hk?A^HjiRP6ff8RT^J=t$jtGDh> zcJ~ekw+=?H-re2XxiuNxKHNXJb?@q6+^nqJTx#_XTjbVjqw&Hc%HYP{{?_C>kCKD& znVT0{%$4=a>zAKfyRxydzOi=g>UiH*b9a39#b#wPKKJcmvoc&54u;PTpPh`) zue8}0o3m}SFc~lXaC>`yZ*R0c+1EOrz{@R23)E(42Q13wK4*aD3 zKKy9H1U&qB}kC02Vg2g{!1cIB%U^=uBBHm9-9ramz#eh5b$*w8}` zEzi$5#AnU3%HbY4JN0RSB?fw&kv?m%$kBSe=Oxc`dGn;sk>Y#0%!5x~E&8T<10Un7 zUXIUxs;PcTBg9W}Sntw@9JPJ;oL6jxlxwlfj z?!50D|024~5^yQvN)=lRp*i;H2~M4rYCZdAJ9&oT2o0 zRLY;_+1Yy@?%+JkzSBf?!v`yzWks* zbqDnh)H@Km1D|TY4|ufpNfV|rzypZHu$eiyuAyYiI_zt7C7 zagzCVc6*m_&QOkf%i-&;V!qcW#?e|SU$st)ukX+EW4rJ;n$P9Oa9%C9kH@$9(mPbj zxAWAv`~N3hc82$5pIPv=XXs9cayoa4uXXD~TOV6z>|rzypZHu$eiyuAyYiI_zt7C7 zagzCV_R#k$l;hrV_`0i@@AZjsv{uSjt>f~4{`G|i^{G3kcc9(@?+*M-z7gsJZ{tu- zG3tB$<}v@4?4ZwlrTjiUE2sXZOYcDG@3oX)I)CNY-$1uIHL$RzeN6frho*CAgQO`;Y@i}~QJWDODYftqRLqEkQE@V^MA&%xs`KonV{m}dC zvWrqa-yv<6-oTPa+m%oJtofYdwAs@8Q^GId;ysn}=jB~@IY$T^%H^I7zdP{xAl96X zfkTe^ie-&CV&>KI%&^{!dRAhH&*78fS!!urd#bM(`YAqfA)C?;aWq%TSFMktKe_g= zcj{AjQ13v!1ED)`*nS`GPQMYbh>3%5V~b7kspVPd*}fi@dG;i?E1x>FdPe3_RypdY zc(#t&HOCm5`Y47z&(9dv^0gN7b#`zOQ-hYeTBBAjpZIyRdd5DpmGZeiTI#{_v2zg% zt3Gg+Z@FHqTEtL`am<$TW7;2Pt;?B1bM@i#9YRn2kbWqq9Q8xkP!60vJZc>WpIT~j zXYz1fUVGqu^?7I1DCI+k{b{M4YssU{ zmU^&!>|Dgcst=syzyF)hKB!OKLA?X@4utN&d(-cOwYgZ!V;th=)~7-Fr>(YG9p zOZl%W&AYsj(AoVBbg zpE=J$gZPjIFNTTLbY`!=>a#!3MUKY65T|v`E1nq7lBYHo3!L)kdweg?dBKNI3m+WK zp~f8bQhee<*6JZ<4tURJjdjjy`JtYgoqqSJZp5+-(&+)BJACA?q zu|12er{Yq6O8>a(UGIzSTgr!??h3xXPunjPm+;xk+EAZR&hi+Cu%R4h@5-ZADIYrM zO&zPtIA%-u=xck&dfD1>@pT5ChuX0Eel)K>XH!0No~5>D!SibD&3+;4_0>G}$)_$c z8UsU|)-|tqVmwQp+FUGf%A@b`y*%dyA3iO7a5RS+bJR=mi3?e)hnP9wJ)bq!IjiM| zdTNfdBS#I^wWc{@StpOyVi|juXFxs2w>mM-Uw-k{^Yy7asCS^=fzTaz*nS`0m2U)m zmV4Kn=ZCPNoX5BM5-zk#`F(fBbCmE)XD{XR45!(a z?y!W9yHj$1O8F(TAIEPG{q_9tLo<0eFQ0p#xj*)#<=Je^JbARl(Gr(C2XWxX9^yP} zxgO5T<2*Sv59j4QP0fSL{i%-UD;K`Ux43zZ^{jTzSdG{m46$mtcg=Zz2ph_Q|NiEG z-l<8_`7+A!3$g>#4#M>JA4WXUSAEW}IqQ4cdVb5v zja3`Ir^&p<+nBiA{)$U+QCGb6Q+$g@tn=G?s^4+&ZBHA==Cjy7G4$DkR&_!-*2yJ? zmh}>AwYt7r%P*Pbqs+B;oH1?c>=wtEHpQQHy>!k}{;bch{q1kY#)=L37H{Jahx>yC zo-we9Rl{Ns6K`wehkAx`+9y=kTnHP=DaZ3UzgI`z)7JCH__3O5!}m0qw|E;97whTO zQe4zMF7hdUh^zD4da57rrF`4d#xZ@1?GrE7tzaCiUqjq6`(UMc%q8(@NWfBD+WXWD*v zHiru^k7roD|MbJR+ZfMNC>;6YL_1rPt<^hk>{+&+?U^IYa|_Mm^MHNn%KGK?%g?P{ z+1Oa$Si5%BD(Q?Djwt=~SN~I*Zf0|M{`}{spwBFR;W2!1>e2pr8ejbNBd6sp!Takp TZf9&4a_&5Gi|s$)A7TFsY4Ws_ literal 41785 zcmeHJ&1+m$6o2XDV;sTOxDtwVB@r~qjD;XIh*5($7RO4kbQ8wO&<-7%&g3O)H-3Oh z-Do!=D&kgMbl3O=uC*)I{t41XP+a&9nO{6P-uvFY?<-S!5-#Wa_dDmzS-e)WG8qKb zYVg0m6TxiI9(FGEcDf52?XW$6^>Tl8<$>VBp}#keb;8XkYkqw@?De;5>%;CF+r9qA zdf2_PwK-hhY19V6+}!z@2!AWeY+vjSCiY8etNqP(_}ad+HaK?vc$8B=z1UbTIg@dVAo55UjqFHO6YMu%Q({qvcY%m#F6X9Uy-A-q--|u$9 z-sY99+AG72?y$S@e6JJM2Gz&T4ekpEwR3~}=UdI0>f-X=wHpyMy_R(eCxd4XEnz|Q zx4N<{7IJ7i$;SJCzp$%Mcc3?*H=sA5H=sA5H{f#vAF%HS7u^TjH5a@jxlZ-cevkF* zitmyqait9m^aDGq2~HCa$3stfa&!En&}nAOFXuuGQuc>B z>CelS{p5u!aZGbaAAWnPl`)CuV{FE>lWw*PfGlwCz#y zNjnMGHEx>2YTs~_hW_HUXIu_F2h zpG_9Q8Hb5JY&milvoEjh>eC(Q4d@N%4d@N%4d@N{+`vPl?*~Yq+B|a~6yGthA*sJ2 zdL&HZ;MinyA9843>CbZqNm#_4l#?aS=Oi51oELk7%)-ZOIS2RK^gQ{A+r$qaB=rIv z($=4^KhJr5x%n(y=Q!{e)hBSl6US}J;rVHF8>X*64L8+be(11?A390sX?aX_5|hs? zW4t~t#`BVN@DU!@Q3oH{q*GpA+~DBSgw1;}#d&=i-Gs?~;O9Q#`S`^NeYyj^0lfje z0lfje0lfjQ8@Mj^C2jq_7{BkUkHl|wxBo`tcys6pvaB7C%WpdlbCK1ix{!KNISJl9@KzrFMkpY*xn+wwSGp7YrFq@VL67Ps3ndPGl< z=u6u0XOU&hleWL4d6VKkym?~z@XGyjyNsSVANB3ZDeHV@mbrOiNo*Gz{3PkQ?!W~* zuN+%X;ZuLz)TcYp8_*lj8_*lj8_*l@y@73qZ-^~Jj>(sX8#pAn4lMKsNq*!ZrzlB0 z;N|@hAN*Ok2?MimJgx&Dxrjj`o<#x+lCWI#ES%$3E9b@U!u#YmV2bnHG&(RLO&D=8 zQ@)FjF!7L&eLzY-?5vXTT{(Cj*VFbOcbNyKZ!g4&J|H=U{9Pz!%HuV@y5un7^BmIi z-WB2Hjp?!n%$hV4?~Us&97#E@cs!5vJ6}G!t50{JH=sA5H=sA5H=s9=^#*Pyd{ZRF zefakxE_$+tb0AJW{$x$rHt#VHZp_KC$Y-GpKC0Shrm_|r<$Y$jh?e4jeb zOFDmqlh@2XJ=t>e>V?ltnA|3AY1{hA;+MQ~wj?_X_fgm4#&{p(qnR;x(Z2!scn{Fg z1MD(o(Q}(-;qyX=H02lNGjW=H-~93C)B1D=dINd`dINd`dINd`J~!}f^!@Pl=o^A` zUdwHn$78(4S0`?IHat^rwp?FsY~ED}A~Bw>R$3r}1vxG@(=_^6KSaX4vW zzy>!k;FtW|kKC-1Fgd@8gU7S-$(hIozU&2l&S~=TxT_8v;>5GnVkeNcyt~D3niud_103=QEYLW7#kD zag_YjPaHqrlk7c->V0yEOJajx`Z?~M);~A( z=??S;^ak_>^ak_>^aj%2!0m)@35qA>q~YEh@e(%a^h`Y5$MrOv<5EL&U>3|yy}=J3 zN%%>^pT``)hIGaB&3DC>dDsv6xi0h4+Q5XAJeZTDEhjBEujAO@Bp>%9pM1!HFRSEu zT+fQ@T2GvqRq~T9eXh7kc@kIJN%dLdn3-^(+j>AeD<|qX9&||F56?#&60tPWHm40M z4^A7l?1giY&f{FS;koz;$A;zNZ#8GCU%lR43qA`1daY(BYvCUi75yA1v6-q3f&~i#%F>l+9H%BxBoHDXOE91ss!9=?$hAjcVZ|AH61K2o z!;b_k5E25`v=T^2UEpQ6mAdMdcF}gxZ2__Bca=Hm@y)&O=X)M|ATQ`~|IRu0zWcol ztoCP*H_iNfbH|^1oBNuzo%L5XZw?kW)<$b*ufMvra`_j{OZ)!3er$cT-J;H3y)oL{ z+U;H48T|gn=GMm5(cs$d_RiIt=X=9uVd2VLtH0YK*IphBXKqn?D_h%Zqu<_2_J+r< zoN6&k%jcFaUTpupwEXD#OBaT-S7zJn`t=*bqv44c`prUrrr+zI>7N-5PcF3C z=bPhgG&35`y}7==y|p!1A8l@5+wJ{kXJfE4*m!z#ebgJy|MJD*J)>do#o^D+uJ-5V ze|YYJ*S>0{<*d3(INdzCZwZTPzLm?5tA*^_PCPh$?xV-*sXM4|puPd`4Ls!hMm*T& zrFoP&ya%4a z%5k0j94zzhRv-I<&D{fdu(^E3m%im(elDJ875~T1$9+n_T`3=P9JW2`?>EhFNWQ7} zU3qWp^Y`<(j1ewyo{q*#EF8h-@>z?0-J4_d0iJ7K&cXnzZ4Op*B`)P#nn#(#d*B(Y z9M{>;!7}fs=>O~4hxh8KJE(78c+6ug z=Kdc3?;pOgS5MtReFOCk)HiVF8~7&U_tC;yxh{JS>HH&M#t=Vuo|Jwe&0Idu&fFAV z=1Ad(=Iv7B*}Cv6=gz;2btiQv^$pZFaF}m^|JxDkYhkTiOvkE+bW;9X6XIL_Lb;TF z2g&Ezxp#^$bENP?^LDB6Y+d+=#X0xZ&%dsx?x4Pb`Ub2wu-N`SEVW0h9AG#nKJvin zI`Y7wj@YwWTVfn=IMy?!@S)>b*7PtugT|!rHHTx(=Q{x>ImUUu;4MCMN-g&-)tVN5 z>Ap(%rTZ-9AN4mV*`3T@%0KG+PecFGH&n`>h8-N`45e?lgfHJt_+t4xa*p+Z({<#r zN7NB}R%=U)0}jV}#uPquJj26u#zgtoeK=;3UU5&lkMKhfb;GzNK2z!Y|!d z3BPooG5)u!m-gzZJE(7JI7~sBa+l2HvLMigz8K zFuWPw{=;T<;m&2*p{QuwKJ@a>FpD}vJTw{*LO;cPt zXDL5*7t#kOm6L+wZ!~}aZ=k*b>kT|J{(TVJ zr=zi4V-EX_(fn8}+!zLR3v1FrtC`NG!gNxew~dS^Q9M^c3EtPOLxbd{5+Z zY$>06VV|)a*V(uDT=)F0Jm_`d;|+D)-$C#(zu4S6=($VzJWGnN`=@X;M$fEcjn}cp z>saG!y#KepbYG=>e2-F{E_}RSSj>`Y!GlfZ0SBH~nI8vrSmebrhkeF?muiJug3aM` zAKY7Vd^d@uhWjDrKByyS@wu1f3ultY>S^Wpo>o1UE9J93_8H4@oqdbXbKm}$z0Er>sC*lLqDCD82xpPIqYj(EcUqE z7ruqHa-q3H^Po>xObTCXGY38_V>!on<~)2@=BM~wdC)H5GnelN?U?P#2T%9pJba!( z@RCQ(<1@$d!Iv6*SPQRnjD-)Yv9Vb5bbQ$SSU+9QotOFep2)El`>1Oh;%lrIr}Xi- zk}u_lv?p29e|K_wmGX1@9V8y#jPC$nYNhp*S<%nJqQ9;&hkcEU#U7XY!nd$iPI`h9 z>(BKRP6!jKaUb@X10UAI@tqk9AC~zkepeo}OZd#?`$0QqyYj))Jvk4bXAr#PQS2~dcq!rCeO;grES$Y$G*kaImWXuIF8w84bHRAm|S0W>zx!W za8t01@pxXIx#Fkdn8!QQzK+4m&4cGz*7GpPTUaXxe5w_VkWQ#3Jz{)o4*6ctwJdVr zA{L8p%{j~g3rxsDTP$$U)H=Yx3SN91i;r5aWi6fuI=VOVu(3LDY|)3g@KFOEdBkFY zLG8Qm@87GZ?x4Pb`Ub2w@NL2GgK$guJkR&zIbgX4A2nFy#UcmGwV3a5y!&aqj?wR+ zb_2a>vEF?TYraYM*WXRYa?j8V7QX9lJT2+()pB}41D3hg9g#zS@L*$nuEU2WVzHJU z*V!-SLo21HV61+*Goz=cg?h}E@?-j_%e_ zgN5(98&6C6do}L0+MkXv5n-A?ago|tKF-$&SsOUvh$FJ5f_y|n!3`AZkH63=*QpVGUZ{$FXlnbrQulaGu+ q56}MUHhgyM!Txz1-}l)qr{&Fw_qTD}&e$&G#7W7`w*LtJ7WRLwf|!Z` literal 41785 zcmeHJJ#So95Pgmv+bn`ap(I2pjRgJ(UfD=Qh=44SO~fV$DN184>s4aKinHsrYmg{t zfQXL*A)!DGNEDGM;tKHts3`d;fP$1HDS4yp3FBezd+*+N-)2L`=xXMkJ9Ey=yaBe^ z7@tUz>FMM@e+QF8NvqesxW3bwTWj@OvsW%{teki}Ilt%ct$pqOW*RlSy4_#j*s87e zI&W^TZ>+8MJD0aMd#gLgYTaaJ=G;`uzm-O|E_AwMcT;LB8=I~E>vzFgci*`KY0Sdn z{K9g5slK#auP+~4?2exsPto?3?e4_sWTxNUf2NVlG{zdW#?i*nes^*vMNcOa>20jv zoqD(3-rU&ewEOFum$z!K_0~GQ&e|*M?S8F0{q))HBmHjeZ1>UGW@Bpl)XnW{*Hdab zP3I5}Cok<8!knmY<-~C@kUi7MzZ{;*eE1KR=H0owuF0owuF0iPXsGX4F4qvg?? zhR4FPv`K^OlmFD<&>Q(>>)`y73tBOBBzM+**m|fXYZi#9%7$o%jgD^?h8uYq*BFc#Gnk6Fw~G;28IL zOo*SymG?dVIC*3@=J_)+pKooZANPCO{Qc%>`{y?=-~GyqxB1)r^LC`l_lI%+&@5Ap``1;_4 zg^tT2j;ycNsK?_wXVqCGCM&<_8@@k}rcd9hf!U$&anY`vbScNLRxUU&dzj1Wm;7JNdYm(MK*cTzBBG(Ejz)w|DK>3~UE% z2W$sy2W$sy2V(BPFB!itvfj$nz*K)c5DW=1GWRU1GWRU1GWR9JMj15x5RD7ZwkDJ^84md9i$0eSmNQnFRyRB zoJUOUP%c@MZ%;BFeB8pP8fBgIhIh&3b9ZrYocqKr?>>5?7>jJyjLB+EnK+FFLH-AS&DaY;0&!r=22~!KfP61tHxav zlXZ{!9sAAe1a72+LRNME2IYK%O24`e+L$#qsdJ!s0>V(5VbJ&`{5>76iFUC57VN#Cbe{@=sUI7?`G|VKKBc4oyqLwA>h&rlnyqv@{M)OT)C>7?`G| zVKKBc4oyqLwA>h&rlnyqv@{M)OT)C>7?`G|VKKBc4oyqLwA>h&rlnyqv@{M)OT)C> z7?`G|VKKBc4oyqLwA?b-H_HF_i}J;+4|PaRy3<^>Xq|LAMdiRqBDvWSEW92 zW4=@`w4_BZgyR4Uwa|ryCa;6Pk&k<*W=NjY#2s@8wWx*n1undi1;#CZFXIzuUOr;b zH*8*B74Mi;>>AJ`uem73b3%Re1Iu|dOskFO6!A4)V8qXT;)|h~RhNt3g=ca3zL5rW z;G=(EuA=eKLp;BeHoqf=+uL`Z*tKIbupO`+upO`+upO`+2-$%<%I}C<0@Gqbv}BHp zm-Ok4j`Sr*=$&iBhb2z#Lqo&xR)rmg9?e&N@4Q}oaTB_7Qour}ZPz|yU+>tBqz+>5}`KeCQ z&~z}9thZQwjsg$*<+T9A3}6vM@6E>4^!M-de@?C?37uwRd?HDvr_-DGJCMAbw0iA} z>pPvfolb9See?3li5HXe<^GX{Fd;U<- zTC3lhy>e+oX7k<-@5$UhmfY)yvgZ~S=NFdiOZBDYdVTrWqGaMV9@xWl^^1R*29s$v vCMTa61RWWF_Fnk-z=Qo^a6k0rT_??(BJZccJ*_bv$o@&f#?yZ&{%-6)TS1yl diff --git a/torchvision/models/mobilenetv2.py b/torchvision/models/mobilenetv2.py index cd3a67e1d7c..c4e83fa364f 100644 --- a/torchvision/models/mobilenetv2.py +++ b/torchvision/models/mobilenetv2.py @@ -38,14 +38,16 @@ def __init__( groups: int = 1, norm_layer: Optional[Callable[..., nn.Module]] = None, activation_layer: Optional[Callable[..., nn.Module]] = None, + dilation: int = 1, ) -> None: - padding = (kernel_size - 1) // 2 + padding = (kernel_size - 1) // 2 * dilation if norm_layer is None: norm_layer = nn.BatchNorm2d if activation_layer is None: activation_layer = nn.ReLU6 super(ConvBNReLU, self).__init__( - nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), + nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, dilation=dilation, groups=groups, + bias=False), norm_layer(out_planes), activation_layer(inplace=True) ) diff --git a/torchvision/models/mobilenetv3.py b/torchvision/models/mobilenetv3.py index b1f14128b81..a7d45264dc5 100644 --- a/torchvision/models/mobilenetv3.py +++ b/torchvision/models/mobilenetv3.py @@ -38,7 +38,7 @@ def forward(self, input: Tensor) -> Tensor: class InvertedResidualConfig: def __init__(self, input_channels: int, kernel: int, expanded_channels: int, out_channels: int, use_se: bool, - activation: str, stride: int, width_mult: float): + activation: str, stride: int, dilation: int, width_mult: float): self.input_channels = self.adjust_channels(input_channels, width_mult) self.kernel = kernel self.expanded_channels = self.adjust_channels(expanded_channels, width_mult) @@ -46,6 +46,7 @@ def __init__(self, input_channels: int, kernel: int, expanded_channels: int, out self.use_se = use_se self.use_hs = activation == "HS" self.stride = stride + self.dilation = dilation @staticmethod def adjust_channels(channels: int, width_mult: float): @@ -70,9 +71,10 @@ def __init__(self, cnf: InvertedResidualConfig, norm_layer: Callable[..., nn.Mod norm_layer=norm_layer, activation_layer=activation_layer)) # depthwise + stride = 1 if cnf.dilation > 1 else cnf.stride layers.append(ConvBNActivation(cnf.expanded_channels, cnf.expanded_channels, kernel_size=cnf.kernel, - stride=cnf.stride, groups=cnf.expanded_channels, norm_layer=norm_layer, - activation_layer=activation_layer)) + stride=stride, dilation=cnf.dilation, groups=cnf.expanded_channels, + norm_layer=norm_layer, activation_layer=activation_layer)) if cnf.use_se: layers.append(SqueezeExcitation(cnf.expanded_channels)) @@ -194,8 +196,7 @@ def _mobilenet_v3( return model -def mobilenet_v3_large(pretrained: bool = False, progress: bool = True, reduced_tail: bool = False, - **kwargs: Any) -> MobileNetV3: +def mobilenet_v3_large(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> MobileNetV3: """ Constructs a large MobileNetV3 architecture from `"Searching for MobileNetV3" `_. @@ -203,40 +204,38 @@ def mobilenet_v3_large(pretrained: bool = False, progress: bool = True, reduced_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr - reduced_tail (bool): If True, reduces the channel counts of all feature layers - between C4 and C5 by 2. It is used to reduce the channel redundancy in the - backbone for Detection and Segmentation. """ + # non-public config parameters + reduce_divider = 2 if kwargs.pop('_reduced_tail', False) else 1 + dilation = 2 if kwargs.pop('_dilated', False) else 1 width_mult = 1.0 + bneck_conf = partial(InvertedResidualConfig, width_mult=width_mult) adjust_channels = partial(InvertedResidualConfig.adjust_channels, width_mult=width_mult) - reduce_divider = 2 if reduced_tail else 1 - inverted_residual_setting = [ - bneck_conf(16, 3, 16, 16, False, "RE", 1), - bneck_conf(16, 3, 64, 24, False, "RE", 2), # C1 - bneck_conf(24, 3, 72, 24, False, "RE", 1), - bneck_conf(24, 5, 72, 40, True, "RE", 2), # C2 - bneck_conf(40, 5, 120, 40, True, "RE", 1), - bneck_conf(40, 5, 120, 40, True, "RE", 1), - bneck_conf(40, 3, 240, 80, False, "HS", 2), # C3 - bneck_conf(80, 3, 200, 80, False, "HS", 1), - bneck_conf(80, 3, 184, 80, False, "HS", 1), - bneck_conf(80, 3, 184, 80, False, "HS", 1), - bneck_conf(80, 3, 480, 112, True, "HS", 1), - bneck_conf(112, 3, 672, 112, True, "HS", 1), - bneck_conf(112, 5, 672, 160 // reduce_divider, True, "HS", 2), # C4 - bneck_conf(160 // reduce_divider, 5, 960 // reduce_divider, 160 // reduce_divider, True, "HS", 1), - bneck_conf(160 // reduce_divider, 5, 960 // reduce_divider, 160 // reduce_divider, True, "HS", 1), + bneck_conf(16, 3, 16, 16, False, "RE", 1, 1), + bneck_conf(16, 3, 64, 24, False, "RE", 2, 1), # C1 + bneck_conf(24, 3, 72, 24, False, "RE", 1, 1), + bneck_conf(24, 5, 72, 40, True, "RE", 2, 1), # C2 + bneck_conf(40, 5, 120, 40, True, "RE", 1, 1), + bneck_conf(40, 5, 120, 40, True, "RE", 1, 1), + bneck_conf(40, 3, 240, 80, False, "HS", 2, 1), # C3 + bneck_conf(80, 3, 200, 80, False, "HS", 1, 1), + bneck_conf(80, 3, 184, 80, False, "HS", 1, 1), + bneck_conf(80, 3, 184, 80, False, "HS", 1, 1), + bneck_conf(80, 3, 480, 112, True, "HS", 1, 1), + bneck_conf(112, 3, 672, 112, True, "HS", 1, 1), + bneck_conf(112, 5, 672, 160 // reduce_divider, True, "HS", 2, dilation), # C4 + bneck_conf(160 // reduce_divider, 5, 960 // reduce_divider, 160 // reduce_divider, True, "HS", 1, dilation), + bneck_conf(160 // reduce_divider, 5, 960 // reduce_divider, 160 // reduce_divider, True, "HS", 1, dilation), ] last_channel = adjust_channels(1280 // reduce_divider) # C5 return _mobilenet_v3("mobilenet_v3_large", inverted_residual_setting, last_channel, pretrained, progress, **kwargs) -def mobilenet_v3_small(pretrained: bool = False, progress: bool = True, reduced_tail: bool = False, - **kwargs: Any) -> MobileNetV3: +def mobilenet_v3_small(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> MobileNetV3: """ Constructs a small MobileNetV3 architecture from `"Searching for MobileNetV3" `_. @@ -244,28 +243,27 @@ def mobilenet_v3_small(pretrained: bool = False, progress: bool = True, reduced_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr - reduced_tail (bool): If True, reduces the channel counts of all feature layers - between C4 and C5 by 2. It is used to reduce the channel redundancy in the - backbone for Detection and Segmentation. """ + # non-public config parameters + reduce_divider = 2 if kwargs.pop('_reduced_tail', False) else 1 + dilation = 2 if kwargs.pop('_dilated', False) else 1 width_mult = 1.0 + bneck_conf = partial(InvertedResidualConfig, width_mult=width_mult) adjust_channels = partial(InvertedResidualConfig.adjust_channels, width_mult=width_mult) - reduce_divider = 2 if reduced_tail else 1 - inverted_residual_setting = [ - bneck_conf(16, 3, 16, 16, True, "RE", 2), # C1 - bneck_conf(16, 3, 72, 24, False, "RE", 2), # C2 - bneck_conf(24, 3, 88, 24, False, "RE", 1), - bneck_conf(24, 5, 96, 40, True, "HS", 2), # C3 - bneck_conf(40, 5, 240, 40, True, "HS", 1), - bneck_conf(40, 5, 240, 40, True, "HS", 1), - bneck_conf(40, 5, 120, 48, True, "HS", 1), - bneck_conf(48, 5, 144, 48, True, "HS", 1), - bneck_conf(48, 5, 288, 96 // reduce_divider, True, "HS", 2), # C4 - bneck_conf(96 // reduce_divider, 5, 576 // reduce_divider, 96 // reduce_divider, True, "HS", 1), - bneck_conf(96 // reduce_divider, 5, 576 // reduce_divider, 96 // reduce_divider, True, "HS", 1), + bneck_conf(16, 3, 16, 16, True, "RE", 2, 1), # C1 + bneck_conf(16, 3, 72, 24, False, "RE", 2, 1), # C2 + bneck_conf(24, 3, 88, 24, False, "RE", 1, 1), + bneck_conf(24, 5, 96, 40, True, "HS", 2, 1), # C3 + bneck_conf(40, 5, 240, 40, True, "HS", 1, 1), + bneck_conf(40, 5, 240, 40, True, "HS", 1, 1), + bneck_conf(40, 5, 120, 48, True, "HS", 1, 1), + bneck_conf(48, 5, 144, 48, True, "HS", 1, 1), + bneck_conf(48, 5, 288, 96 // reduce_divider, True, "HS", 2, dilation), # C4 + bneck_conf(96 // reduce_divider, 5, 576 // reduce_divider, 96 // reduce_divider, True, "HS", 1, dilation), + bneck_conf(96 // reduce_divider, 5, 576 // reduce_divider, 96 // reduce_divider, True, "HS", 1, dilation), ] last_channel = adjust_channels(1024 // reduce_divider) # C5 diff --git a/torchvision/models/segmentation/segmentation.py b/torchvision/models/segmentation/segmentation.py index 9bb63e1b085..68c298e683a 100644 --- a/torchvision/models/segmentation/segmentation.py +++ b/torchvision/models/segmentation/segmentation.py @@ -1,6 +1,6 @@ from .._utils import IntermediateLayerGetter from ..utils import load_state_dict_from_url -from .. import mobilenet +from .. import mobilenetv3 from .. import resnet from .deeplabv3 import DeepLabHead, DeepLabV3 from .fcn import FCN, FCNHead @@ -29,16 +29,16 @@ def _segm_model(name, backbone_name, num_classes, aux, pretrained_backbone=True) out_inplanes = 2048 aux_layer = 'layer3' aux_inplanes = 1024 - elif 'mobilenet' in backbone_name: - backbone = mobilenet.__dict__[backbone_name](pretrained=pretrained_backbone).features + elif 'mobilenet_v3' in backbone_name: + backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, _dilated=True).features # Gather the indeces of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. # The first and last blocks are always included because they are the C0 (conv1) and Cn. stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1] - out_pos = stage_indices[-1] + out_pos = stage_indices[-1] # use C5 which has output_stride = 16 out_layer = str(out_pos) out_inplanes = backbone[out_pos].out_channels - aux_pos = stage_indices[-2] + aux_pos = stage_indices[-4] # use C2 here which has output_stride = 8 aux_layer = str(aux_pos) aux_inplanes = backbone[aux_pos].out_channels else: From 359d941bff241a7fd8428ccacb946cf97cc9727b Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Mon, 25 Jan 2021 21:19:05 +0000 Subject: [PATCH 06/10] Add Lite R-ASPP with MobileNetV3 backbone. --- ....test_lraspp_mobilenet_v3_large_expect.pkl | Bin 0 -> 41785 bytes test/test_models.py | 1 + torchvision/models/segmentation/_utils.py | 1 - torchvision/models/segmentation/lraspp.py | 67 ++++++++++++++++++ .../models/segmentation/segmentation.py | 60 +++++++++++++--- 5 files changed, 120 insertions(+), 9 deletions(-) create mode 100644 test/expect/ModelTester.test_lraspp_mobilenet_v3_large_expect.pkl create mode 100644 torchvision/models/segmentation/lraspp.py diff --git a/test/expect/ModelTester.test_lraspp_mobilenet_v3_large_expect.pkl b/test/expect/ModelTester.test_lraspp_mobilenet_v3_large_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..b2aa2ca89a948424ef80b032ce84af68cb109cfd GIT binary patch literal 41785 zcmeFaWq6a@)-_z9Nj0fZpt!sBE)Hu3cXxM(MkvMIDzO$eZX0)!;I_DNO;Q}1;9lI_ z-jRp)J$Lxdd7kI}ew<(DT-UhP9b040F~^*1?Y+-tw`rcu*T=`--{=4HzfhlWpT4pE zhYTG(u;PHeaeXW3hQ~Cjr}XLO^}nfE`^RbVtqQ$I#SM)a8Q42^V82mAV+Qn&8>ktn zjqN=;I&g$fsZw41u>VMWv+v-6BeMMdNnoQGZQr=|zuycTk+n+*z9UqQ5~^3OQYpGh z)k>9WM32bUB^%!DuNyVOx1~?1xDm3p&3#HW&(b`wd7}$Z zjCD{avx8FBIB2WYK^DNXdLZTYJpYIP->d)Or4Eu}p}|3ohC1kDBL}T1=Aiaj9hCKs zoool~)EjkQF~?3LR@Ryt_X9tSNt`WuTg)?wS=6$zI7?Vv9c9Mq+YgZ5T-P|h$1jecRLEAUyRb#_Wv zZl~1kc8WL!zUy{c`N~d@v%zzb4jNO=L3`>t=xt{QmD4zA$V}w0*g>~#4$6B9G2L{~ z$aHT+oOF;BOLsVE3%t4?8rT~+=sLXo1U>K-c?NE?lNP=&a};@_Csw_&Q?CFA4Q_RC z(DDj+4P6V|v(v2ucIvbn8eFi`-sg5o_XBrP2Q@>lPUwobp>=qk*+FwUVUCOi zH|l(!J1E5EpcdFeFLTnVyiW3sc2eCYPMX}yNj%O;?kV2jk$7K%HK@@j?9s^TpvYMc zO78|QL^;SHcTkTTc2c?4;ZpCv}-v~#VKb^$v8&-0-2-dLB*Nu}VGP4G=w?7Ia%tbqF3kgtFl z`{5zHsC1AI{N~%&L4kS)DWQ494a`}YlMWPf($|_!s@~Q~qX&ayl9S&4<)o-hPSPB9 zlF9BQvjjU!;X4x4fKTc_L|%Ix)ERx-rx#|lfSUR`X!12XnK0{D!go8WI_Ts;2f5~A zULA8#mUj-yjygZA@1!wZ;5D_A7ENDDE9@0OEFO0Xf&NmBI0KBtZ& z*ZB@wf?kr^5p|up#FFUCD8qA$I%#He}Lw4kT-%hF> zb_$q*v(8|r$#y&4335;!cypx@^U;aEQ#k2(OL!a}J2V|JfP36>Cw1E5r0D%lQl3V9 zSKz%HPP#5ZN-E}t1Z#IW===-^eQgWx7eXD-cVoGoR_pEL>|&>Z1MH-M4x@0^PO9yo zS&0rZU2xFcJmBmP-42^9 z+fFBSJpjFrI%(q>CtW-bUtV)k^7Y>^WA$M49Q3gS&&N3EYCQ)XRiLlowS_oqrfck^ zu3@LnzIOVQ6Zeb}n7ydMXY^ppsSa9&8Tz?0=4!H&dW%jf2hMLNoOA}f3!Lb$3r?yG z?X$X^lSz_(&R0DIk za&Gu^qJzp~)|Ia3q~gf^CNyq~JWm2O(wxY_Nrk~X7w`vXeV|?jJkPy4VZKXHsV01v z1@i{yUDes}8ES4Qgy;0LbkBO063?8avQ=;fuCP;Vu!C}7_U*mzpl+?5^mD(Hy1zml zo;%Sa(EX8c-9tiE9LwjdUJ8hNQ zDf!}A`uzMXrFXQGOSIG3l9-L?zfE6oSM28`J?6@(AISd;JpLZN_!^#nh8jP1(yx0? zN<`ncg#TIrp49^@&PplP4uO|3@1{jKNc$9CJ8Y+B^X*hU2D85&V(4b4xLxo`7Z0SxK`M^mX5l08WvwH9tXRQeZJJSQoz-&lDn zjYJH(pH3S76=&Z^Ck=e-q&_d5)blCogE*upg7~ER{v|H<>85SqCe&M9^fvF_(OQl-b5W_F%@C~*2q(5R^)C|5ThAsr0}nmD-~oM^O&}S`0%RLxIe7t`fZ=!5W2= zjs-dCegOK_-$_N}nBm~+lNB=?GyX{^{9DOMZ93s>ALpb!OR$&dqyl#_^O5tE?5R{L zG?gv{r_#89RQx`b&Y~V8p+&y$n1iT^4#-?naX*maepM%h)^<{@#_$h%>aWh2=e?cu zbg+|h>M+kHsRT0+lsIUoe$=6iB7Hc~i*|2_GT`LJXr3 z!&b!b2r=D{`91mn^+*05erf*GB^vyh|M7p3zej&h{+ap*{h@y*zchc}4E_HZ zzb6I{TD%fODLy?A#8&48am@ii{Ju^Q+szTgjgth?XQ&`{?jVQ<+X`ZIUqRfU6U6*e z1@W(BLG=Gy5GPv%@!df|w4D^hU8e=4A`se*VG`=wqK#LJfj@#+VM^pAWCuTu^_&>gnAzq#PBVGSahKvR+%Y?jZyC|Jp{2w7eQ=4KoFbab?u3Q zSUFJ;%gz?uL|M?sWg z^9w-~t_$K`c&PSnK|HY<{+um{mnI8hw-`ZOG*A#zh6!S#RuHGg3F4S>f~dh~gJuX~ zZ^RN|6vTKlI8y|1=wE_(eV!n;L=NI&L5y04_m=<0u#ENVH$jwQ>^nh>yC;ac3xXJf zn1@&eaR9#C$As9C@6}jAJcry*YXtEK;yHj=b`BB5ErSGc9d!8x>>rFh@P0+LAU;K$ zYw(+1qXaQ>tRRj;yrU&hWQi7~X!u1CcRvxtQf}mm9twnh6E+Lt^Ci#?xkXGBMCBMk z3;=&NXz{bVAbx3w-fJp|FY5^63qt*{zO4@p8sRg%|E&eS*IE#>v=_t~=wT10XP}@w zT9hLE7k=|v5N|_+lb4Yf`gzV?K^(gY8Z1R^p+k#Fs3mf1*i#Vev=l@FXZ7;%Nq#|e zQasD&pa0|VF zxv}YlAj`D;Q$huC;+H7#T zLA0;OJWCS9GsEElF$mN=uYj8Si!39VJfQA0@69qr}~Zy&(&a7NzK>z-!cC z+gJD>0{OX<19{sf%oqr{Gjqr_Ruqr_dS zqQpn*qQpqN*Tfnnj@yAc?1>VO?(;?~smo< zHcJqfpwFJd<28#4qCP9)xEm#wJrX5eLcW)f?*-(00r_4y3Y?4*&z%KQ0n|l`g~(Hi z`XBK6DS8Q-H$cvw9(;8OUWe|BHwj|G0znMM9GKll5OXxdd!b3C6+=B+S;SU zg6H7Bi%}xK8YL#Vq5rKYvBbS7@jYsP{qb+y$XM^b6htYiqTcu5IU_t*4W7G)oXs}$ z7xKQp2%PXtOE=#SU< ztV}8NPg%^s^1tDev5Iz_4H9HW-Uq=w1ho!dh8dQO=jnJJh3CQetS4r0XF(j+2J^ou zW_o>`KZxN(6+z60S~SPL9S~2CN;s3Cjiv@>CqAEATM(1bFNy}gq0CtQ4kKPEpt&FN zmV;XkZuvApl#K)i0^n`d8fQ>T%)%Bp6Pn>HK@3@;g9%=UY>2r8tiHx(Z?*oEK8OKZtrr(0sQbeqJYtYZqYFC*rrzd<*K`r5j?wp68J7PQ<V;c@&J+WL0QCCyDoeRZ3dxM;}C-pI;`l8v#cNP(|~97V8dl^$j;)t*@@i#7Q|Y5 z)FKXg<98#FPX+8 zgFl}jR>}+i=RvRG^%letQxV=n43AKcHBGTcJM?FFDZnTGGqg%0a#BIX9T zheCtf@Ig8>@Zd?t+U*(qCBfmF=%u6Z`+7kvZp0l#huT9sJ@)B@oVSJI?x4i{4Hm?a zfw+_TBSty$_XU4e+zId;g%}p*NAI9MgDazF5PP-eh#&pX4qCPcJgW!K-+H6N1Jvvk z>a`Jjfq$_UbGDrzGSprfDTw94w@@aCzS(fc@)5)vnCVTvMTt8<s zW;kk9ssefhv9E*=dUt>)x?zq1p4Ee$AHA{su^`5t!F_+TAm%pV?9&M1u{M~s(bx}r zJ_B#gEa*?1AzE-P0q<#?FYj^A6nY&c4#V>W=ui_nt$+spWd%_SA6$ihYNHR{qLyy} z&+5Uy;%@~0Jb&;H1OLoua}YkQsjRM zIlucO|3CRX`DgNbc;Edm^M48brTiZKJ^WJrJ^X*_|0n;S`Dbe2IOHNJo*Z>i(K9aU zan42auej*^br%KRb5Z?t7fpKUq9gBI6#Cgky}r9>i;tVW`?{%FfSVR7-1IoiP1*6B z58o+u(?!B57uDM4qDD(x)M}QCx@z&e0p5^ud!GNp|L@gb*o!(zaQJ|WZo&g!;ep5t zE~=X5qK>y+6#K|U$uC?~47IKB$wf84xv1eU7q!XertW@j8XV-N5g~3`B6rin*Dfjt z-;6rpqJvf!$rrn*^-LEn)VSzCe;08ebN#l#MN(+Cx~Tnb7uA3l3Y>P4FJgG=f)C-r zozL+6#zpE6E_wqGj)n)nB8JI`AuD1?2z1kyEN;?0cTu%#F8XoAMaQVLSi|So+(JawLr?6M>Jn&C-(VXEfs?_&4Drc<8dgv;_nG_eP zmLXUCt_*Zoe$+(~=tVvB^z0k=%0l)o5yMB+;t@EnX2fwF@8`MgqE2=f&D{h2R%2dZzg81nWEg_IdU>O04E!!Z zi8$m9uSCstQI$2wYp089A9qoM%PwmAz(rZ$!6As@0(7Vj9ae!a@ICyAINZ?VIX++K zc2Pgfo_zRi+6w4ya#73ihM_DSFFAsn=aJ(S><`%|(|`BOk=MmAj}rxc!#HOJ>w!ii<*YE;`u9McKPz zKOl4UL9SBNjfJ1V-8k7rXQ5q-EiO8@-$m`!Nwkeiw2(-sv|^WULGD9Vu=?t3k;76}ZQ3a8b4$ z=%<6I$!YlFBD@Z5AD+Ov&qa@sijoAA%D&~Xl1!Fela?!1#h@+y5EVa-FjqzP*BE`pt_?`q!?_(a^gnnr_ z`!2y>h;OPV4sf4=9wRoRFO2X|49+3!m9v724l7ZcS7{V+K8QWDSAYKn@Wvu3(n9mYC{DSYj#&;gWV~ArmyiRub3uoD1sLLhzZ9R0K z*cCIi3VJmR@jnOO=`>otCJnw#Bfc|@676aD=iF&jc_tik^7}-W!;MshB&5pe1_j!9o}PJqG;^FXsgR`Rw@YZ5lPYkVaW; zX|!?^^gohD>#wCznU`r~@x}cjFM1F8R|ntt_TDJl*iBN5VQva5>84G%OV<02S@j6_ zhzqDEchQD*n5ik4W8-oD^~aoR;i7_7asET!oBq%Yag;?JHvUS(KLf^`E`&1+ysPU% zCwO#aKNsB@=8e8uH%Za9ubYI1h@mue2zJv2)M6WIFdN#(E``4nT{Li_i-zhj<1tS) zUC<+lBd!kmx-#;tv?(1~vx|?z?cT<->ZdzQ#O*bFl++B}zbSP$hZ|IFW7@^@& zH_jK_eWS{|X;3FOEuG?~p?Wu+9qp!a@Q@ktzpmw`cBS03GXi@ej>ezhI|=kHu9Fl$ z%3i1YPu;i!yU9AlP4m%<6A^dcweT(c{<9nA2+q0?)OI1>FO&m1)kn|7x@rFk=(fd8 ziurC*BNiuqU#^{-42a=bF*h|wEx+NO{8a)ex99o0gx`YS&jbI24E`qI|0UsHB;k(+ z|Go@bjm?U!Z3%+w0ldrh1lToSyaEpl5rp=~;xIf$6Fm*x7*wR$`uk?XPQKGl%P0t9eN* z@=zkXpPs-h;R$Rki)Zx*&0?j#&Gdql+w=S%{(rCje+@7)DSR3mnJdW1ww*VyB-C?2 zHv?nvK*R$*dx4toSgvP-PUu&+a*{XhUNbT&mR>fph*TppoiZ}n5hI(t-^f1hFfz?nBYU*Y$g(aovLnex zHf_3*)kTaxLyYWDH+-+9k*Vq!S&j<^md(e&-c-=DJ3W(Fsxgs?2NKxkr}1oY?syiq zZI%~W|1>fwE@U&Y%Sscwn%l%&#Z2r*MH9PK)5PvIG_eP5Oe}w66El=Du`dB8Hu#y5 zU3C~)eGB}8m)`eFM*}Ch-X0$XL;ceIHZtO zGO;6>v#(>d^NFZZ%nKMUXPe?CUUG|tHOZA8D^(z8z4^{iNv zB=&w{B0IV*fu%f(XP=;f2cI+6m+MSSiX$gYY`EOavNtj_KFiDoo-nguSu)$zAer@? znarlfCbN8qW#4AR2VLGgFfrpa6DwW7#E!$OL-H9}SbYQAH&oC1rzEl9J&A13%>>rR zH-QC~jc2{ZS^pQk@LcE_YAr#vtY+3X%FGPC%xsU*%pRBGw*O^jy=}UthCQ8*815DHtOnh=GWpM@W*rf zNV69Tjx)1XGtF!g>b_;MnO$FNWOPMO)(>1HN-X<|LQn%K_cMiv}pWCNDK z?{duOSUnqIPGVQPC$bvgUixts^Ep0~^`AY1o$NK8l_@ukU7s_R-H>4a9y60-&Q3FH z0DV8MH?#FC&8#c@9yAYLFq@es5o@%WZ7`YG+;k(GQs2miWBzvjqG$Cw=vf785*xKM zk>wna!1iX3XI+oYWZz~OXztnZFVtPju+@H{{NX7+;owVBmS1J_|Q zTeZQ=uBMn-AT&|+HM7{JW@f)#S#IRwl8g&lA}=cLM7Y z6VIyuoXK*f%wV4iPiI$mOl2{K$*hcZ0t=`+{{JEn&xPtFdtpM^WEPb*nLS1f%dBSB zJ|6r%&FpX`GaD6XX6`xY7tFk@nE5$y*C{nY&)BXc*5*|r8&EotP4Y=#i{oaoJ?S&p zl0_Ps+Xp-Ii;tu z8^0#AIa?;MW?jcJANeTOqsPeqLidcdN%LeTMfnoR?9+ARJKN0kjnNxFF!Ohq*yAB4 zW<`&?m&0#Q4D3ZE0~~_#Z zc7O9&_Ne(NmcC>pdjfb?53<5rQdApiW-G#=`F;}{iCKIj(8L*hJQ( z+&I?7G{y@Y=ZX|{wwPEmoN?{%;XI#&`)H(*>2SXo*UG@AKhd*E7Cjrb471e;&!hGg zhUr<}u6pL*M9ot9uU z&Qd9wy)t568`;u8BWtwKz+M$Mu!TFoRaeiRrQ$q6t}&mIn7+E6t;aq5#C|=y_f5}y z$iNCrHn958WkmlZw&GC&OC1`|zCEAGO6z8@&hMtNS)-@2>bfajs2FQxQnZf-_jUt& z6m4J?!8^8+o*mth#DXd(u@30%LL%-nZIhVz0JD9po^=m0uuYf&Uj!p-a>B@zUD1~> z^elc_5<8wFk@;+jXG}GVjk`F5?H)9pUA9g0Liz#RsU+w*%D@_>>siI_dKQU%v(!yu zPj}!BSt5}gT$I4xoJe4?n-kd^oGr1V_3UkT1BafsMw!{kArVHK~Ss$l?Sq zRDYkuq}>R+45CU-?Gq31~awVf8{&9LH_ z2WckO$B@jrt7fzFQ)Z+8Qkbn^GF$Nqv+9tMO-;s~u(zJ|shGsNEJ$RXfy`CP?Roa_ zYry{){C&Wm2L84g{0$`haT)wWGx&RhzuW(kKa<$W#sSS*GTgJ zf93aZ|9AYE`AhZxXZ(NWf9`+KKU4cme&u{C{(iwfkh;K1t(I6Rbvf2mSl43Rh;<8A zo0b0BYQ^7Yw$es?XXhL%iAh#EJJm{;$5`pM#!656Tj^aVEB$P0C4cYlT&m3p?flC!au2C1y%Qw95!wNj-bsA;5?28ZB1xs_UG z`HfasTL5n-dt=)SE0r_BUx=aj3M;LI_ws{tA-7VFlkm_XE4@HWn^UmoI4c$DXQeyM zthBU}mAd9btl(*(u##_(mAJo^MuD?}uazEwYxyq=jsIz(2@)8zR+6I0Xe%8-3^k#{ zPV_(p^{NT?^g8 z>pf2`l>Ev<1K(SSzF27EHw$f$pjaO(NpTx{b{cObhrvqqmqBZbm7)$Kh6`3IaNkN> zuUe@yV)B`8rA?i!)S*0j4L!9X%|dN07ApL=g>r4SkYbO8vK_O~msAVAa$D%(BMaSl zYoTkOEp$}^-Uc~HFsi$ip6aaBE8a>M(BqA^S!w@C=y1b|KkvXw!|y>a#Iyx5IvZK( zdAOAnw=Gm2oUO)KD1DHHevGqFxXD5#R#=Gbuuz+m$lqn5anBLcM+^B%(6KJGlt9_a zN>c_yXUvqoi>-7KwQt}=Ego5^gpZ9%zqHa}o0YnZw9-p>EFQV!J7A$5lPuJ%j)klh zEOft#g>v@CYrHpUfrYkhv(N*(g(9Ir^gD076tIs3YwK7kdoL?ZpMV$^LWkYR^OBVq za^Ix1(UUJ$`hZ@@8e^r<;;6|j3zY*$^)?o26kwszAGgx!To%e(!$RtA7CJluS}e2B z0MSCbp;PbI7U~0JuJJ{Xg9Hpd-p~Pk2OmyBk3HLqdb_Q3>>Fy3-$v_XHY&TzN|pgu zijKf6MU88Yw2)ZDLiH|gB@(yN)<;{ZV2FiMs#z#Y9}C4MLKB;XI-&-q7Zx%Cp4AMv z3R=C;2U^5{f7}==&4m854_ay29ps+PM!QPbC_2whFc;qvF`{0~~E=&P`MGI}Z zy_Ft1wvzVKR{B!dLbF;}C}fO<7Ok{UQHO;rsQWwg&anh5O~mX8wOeV;b1PLx?Hv_u z^tgzP5+7TsGjmWa7GB71qt2CWRJWXs7`#vobExtncmngH4Du== z#|*k+q3KQwHNm`5hT%+(w$fto_JNm5z{}4u$Ijt*nMlQRI5d=^P#1U|adyG?)ifTN7yeFQ~D4P^cYds|4r!LZfL`N?w7!JcP6GHfG*W%$Nc;iXj`lSJ`L{ zYS2M$BiU2EWM9uT}HMbs!OMgE2J74bj9UQT## zCv;j`%0|;0*=R#s8~kmf*m^cfs|hbuve7E^g&*P>o)4cRPO*lKF1N<48fc@R6KoVQ z2mBjsq!Mk^2yXQn8)e&Vqs6D7+xg$jV7S~l+a-%da*LTd99_^e)brnJHG9%dNB)eY88m zM!7M6&R4(;6p$z8(MR-x1%4Zdee;7aKX?Y?J!?hOq!c`WUNz>m(G~o*0OrQAK*R%h zRu4w3vv?sF=4fA>OIz|;=~Htn)tg`?(R13kKXgKVoyyy&3u5Sknsk8%!%!bHV%!#j9{i3O{sgtW zW+fHQvA&p7Z_!up0MF{dtzH%{92#e#$yN)gaCiKf2Y-Fl#!8dp;Xl;i=Np`Z*f%mK z`aYkH`hh0_F>HYj7f_2&@WUL`Wff-i_Fp*f9^w9k`FL+9xR&6~f_tqVXMq9mtR8&D z87;-eAr_Lauu%VNxVtEEzd=7vn1nk5_NW34>jYsAhM~UT7zB+Aq83-7w;6M&5%|}k z?pttHZHHg?Ux((H;U`z4#}aTJ4aXUXv;ArYJkL^Y&vPb!zYPBT68@?ZejE4)OZbx| z{9D1l1N{3k_)lf6yuKMDSa{+|46L;p`1`b+cA)L+VO#aNgGh-NXPR^yMGt!U0-S5v= zWewyVngwy+Il+9lOTh;f4&jkQLwVGhP;RLk$}8*-;aj3Zc)86=zNx5^mszUd8zU5a zLUjdS;iu%suPXV|wIN(NDwJ1l5XQS?59gEbh4av6*}Wj;_B{WG|KF>>{)3!Lab~U` zZ+OX{9}NrSsy0FVfH{~~cPjXUqA|O%Btk?r<8oMF@%q98_G2~!}y@aDF{&cJ2l;*VMXxT#B@7e*Ag1 z0PYhN$Rm0L@luJwob6Qbws(|#U~nj}GdPsreI3GQjt}9*eM9&ulade3rQ|^?6?{t> z1$Q=9@Zp~oJYb`eZ|EAr8wG~)2m3?$^xLX^D48U^oOL&1I9 zDR|SD&_J)`7fOWi9LGX7k4y2Xr$4VbFn~{r3FL=I z2Jx&Df_cN~3Z9sxCf-Y3*ZOzfqedyAg+rG=FNsG zc(Gne{<9tGy*h+9ZXLoaepB*X>pkx&`JG5Xgrcg7{5!FmKgH!4F3(d325t z9?>s^56vCI51vQfiAp}Gwvr$ChjJ}(r)4fj#+ zbxN)ns^qCK7@DK@*7<< z)KJ~VeZ{4co*C!~se={Yo z8mZ(19x3=7^vEIjBytwG4=8y4x0u7Fl)PwfC0{pB$xAzwd`s34{v!Wxyu|9k_$&cl zxCifTuMo(mehTDm4+rtWNx}SeTkM?&c_Qz{#X|V)n@V1MfszkvqvX~AC4YZZ!D~)b z@F`UkJh`@l4_mC@jXo%NQGEVm6aJsOvJk$rPY6#z-~34b2Y%wY7yKqgnLGjfNKhc} z@*|MHdKtvy?*#Mw7ZrTxF(q%lD}?7k?-z*+;iW&r4-=KVhL4ihor#|F$BZxr^Cp{u zdBuJTo&)iIzo6vzl0x`doL9Clp?tx%Fs@gGd!b%ke=fy{RDXUxJ%Crp7Q~JDgZVSm zz8O5eAtr>&6GEYFC?9~CQW3M|3;cf^-ZM8;@Gf(Md6D};+$Tpce_L9?AE=c4O8XFg zdI)-NQW)PdC!8-^pPlE3j^H^ZC~?=1OR;adKfiq?vt_nTn8qrZ~duHcMr9L6{8 z3+L;~M({Pua`081a(W?fnje=UOErJ~a#aAot_$CzT23OWK9= zr&YsvP4sipFC|aAui&Ll2lLTeg80dlKpy)vkdNON%%`I_^_@fcoD$)D@rUeu)$tsB z(_guGg$a>fcv;AgOVRY1AOA509*hX$gEj{9JkasnMa=ubVZ8eHaGtL~c3#05#%oUu zVF zN16rmdB+3!FjWA*cPM~oX&TH6qGqb@VZ6tK?0mdBCtvt6lJA+4mp3||k2eN9s|UOL z$i0x(-jC<48NhoL3gW9!*N4xQyws&o9)lWM|Bm4Kc17?VU&47aRT#fBJ%o=rrQpio zV7{hJAg?vgpD!sBz)yq*^Oql$Jo083uX8Md58IfF&rQz5501*uFQymp!n!JQE`_^< zAFmq_z&Ab%24Vf@lHPhTtf;myH(Z;l{tfoIk| z@#C9j_;cPeh^OLCaOZ9)f44b1_Z^*+=d6>Pmk!LwqqX_}3qr=aqN$uq(J|1EA2Rs! z(uzR-mpO?42*KTVwvwL@59KxILeIa$_}xmlPn=frB0UxS_?sX;AU=@G^Mc>%#|t;| z=c8@~@-w3`gF~PZv^rTk2hW|Bi>q~c_yV5S3(MQfxfJ2w<@{$0fBs}y0Kfb)kc+j0 z`KA;FpLZ9tKN|A~XV#-HI8*j1_{6cn{A9Ht9**;>$8LYVVU!o_|U(57gqE9iiZ_ zX9aWf;vinZ8ps{T0(eZCKM#C?+Gp|O@g_gMA|-(DM{JkpEBMPrA>4mi7%#FSJ7=qN z@H1<2dSUJeIhUgO1v%fJ;>Sz1@#hOd0=Vp403T@%~eCFeg{`|+-Y1Ng4D=%3@kT#fm0W>6?EUOk*oLmi^tM(~Of%$XqP zQUn~6^UrPlc={bbo~HHZr+)hL-6nV|Um%~iIgtC|Ui1Zb^XC%+_??IT{91SwPHT#EDqa=swi zk2l%q$3GW@59a&xu8IKeKQn;u{fat&2;kr20(iss{@ggypI?59c@^u&2fUHlki(J_{)R;c?SQm4E`tJ&*b&Q;d%aN{0sjZ{#_aQ|9AWvuA&`Eu}ruQq8McT6#iD zEe~p`72sK008(zx^MCmNz52)G*N_x-qBQiRqlRWr&``~l8oGZ{Lx$%XsvNGR8`ZQl zs+*R6jnh)nA}xjOLrv4PRQ$7+EV*=4y_$|hm5ypx(9zNSI;yYG(edwEYV=%7r>|?N zsa;Ef$Gowun}(!#K3qe3vxZ7;)6nrt8tVH|L%$-yQwtvIrKN?Fw6q7@S9fXY{RJ&W zywp;3ppM#=(2=g5j`}pwQQ_)3x>r(1s}NH^KOGf?2Oq$LYv93IslPE3s|V*7XuPm! zy@tx3(hz^7p;mG&-6^i6kqxyZ8>prEGqw14e6*xR-bF5I>Dp5*8MEoAZay7-sH~$+ zEp_DEPDj}gN497kWhtT~pKu^6@K#Gc_n@828(FVtNQ#E{HDv##p?3MSRV1nD-heQU>)WC zp`~swv^4hNZ;Zn_9`MvviZ=zo1>VPPwbXyOmTn>EZp*cFd5@ObK<^*W@h0kh5SlN= z-V?#yqmGVhA@95cbkua9j!t&eQN2bwI#N|fggWf^cmuOwCw}+t^>4hz`W_hA+8fK@ zF9q_Ql%S>O%e2&ErBh>neCA z7JPl;7qx0%WiZtZB z2z&R$UIn4Uy|0)X@3b@sGprQ)MR!Y!Ki6GLz8AHWig~v11p4HVmdax$eA=$1xYh8A zUQ1WtVOdu#RjC7im(9&`_@@|3Gg%Kdh+p!VPG47BTFH4jWL51&_6qa1CBO zg?;v5UwHF4`f=A^;7P>3n1{1s@%doH*aN=C`D`i+4};J7Mnf+zYe=zIL*!>_t`6yo<9Yi0kzdYSTKu$!fYW@i%T_^*~VTyzs2Ej(VeCFQWF1Q0M(ywN!D2 zmR5JsQmzVG${LJ1yw%VtaF#lRvvDmnHfX3#tcJFC(NMQ)8fvD*+4)dSrMMdZEMHCX zVQTtVM@3r;PKSd3YO+HS9;rFK5h{hEeep`K$G zYshD!h7x;f@aL6lXi*^z70IfhPB+w4*{Y`S>1uk@K~3jNs>$*-hRn_widhjuH}^XX4W;#>$z97)B$x5*J|kk&fgS&%;W1Cs=5=LCJj9ssG$=o4Xwzf zq4BTP)bWg(YOGRIu?cDl1NV<|YI-75)0JB>baYP)SyN)D#H1K1o`GUm>&!s^N>CE> z>|=e*Dx7O`aj)!uLQ7FgFbmO7=Nf709_n7}frjSTH1rht*Kew!r8zY8?yj1eS=5v@ zNlkfUp+R>wwP>KG0p-;+A*Y(={)(Y3=`mFOW(-vWJgW!slXPDA7^9;rm~Y$5=x7dR zz=(95d+5E2>$Iet1%G$I9Erfa1AX=MFAW`Tuc3L_HKe|zrh2ILIjx$q4^>mAerj5b z7;Zv`BCXUku(6spzyrIE&rFh`E7srp!Oc2ZMdA2p3f3{Rj#7kK>iEPMvSmurJ zSlrVkm@^E#xI?7007ysX3P3>}nJ^QL@Z>0XvQZp*A+MI69MRDJAsRAQ^lUh z6M6Y`R?{*aIQ44$`9Eq>A5@e7H8rh)M@IZmQ>+ACptlsghw4bvUq@4Vf)~BGtgVi= zpdP%Tj_kE{^bIv>wnj^fR9Z?$4^|(ES)UuFWHH6B<#Nauy}Lv+*u6|t5c+?Sn)0F7z6Gl3HGFq(vYIaMh4ydNw6C0owhYqH-z#vpKj@7I zI5VaAj(E!9{O&PDM+SqA4lK~o)73i4W5XHDaaPXIQCrN$rSM`p?x)d-8k!iOq0`Vh zdocLk#L%N!YVwVT@6N-Mc`yS9XlSwpeexM`^}?PK^ap<)OAaj*+d!GA&Kd*$p{y*XG@CW}y@XrFj3H;^2-|r9p zXbHcEdq+kLp65ULJ^6e1FG}+NnvwsIjQlhC2TAhx@SDM3{tx~i$Y08znYV}gpYdnr z@8LIO=T%m#M05 zm8te@l&Nfg$yA#r$yBQc%T&wS%2e}f$y6!DWvZkQnJV7b8&Yo1^MCmNz53^RBvVO| z=Y~vG;IvFtbf-*JYK2Ty-Y8QEV`Zu;17xb2ZDgt^EoG{@J!PszF*4QL2{KhPgG{w* zu}t+9b#J#%rt;ebUo4ZUO3%Xg)iPD{jxtpblBv|F|73+sm5{|7qdv-1QfzuIQ)PF_ zR1;6gR1dewRIT8vJ$jj{&=~lqzf85iuS~TQzS}xYrdl^!ruuuOOtlc-n{z~_(qE9N zHrQpV$5wc9u1qxqaj)-z*L9E!_=g0@REghwy>Lk`S4mOx2Xc5SQ(d`=ypG9KI-5*I z$R`Lfq>YxTjH6_#67lGV`7%|v^)l7_Ju;OUdX+}Z$DYVkP43B5yN}{Kt7WRW=pA3^ zHWEE^uNd@^A%_p(2QpV7ms};qS%qBn{VVz@U8d@DMW&jKy^bTF4@;2~V(6I!?VLH=$|T6&AN?vUdvS9vdC5GpV43VU9s&l)loC{7%o%&Y>Ykxf2}Og=#{S*l1s`} zQgkmMSG^9BtKz@NRQVsvR68z%12Nn|45Oh#MUNJEeta1DUWEP+P%G51v%g$bIgebG z7$R47MC?UTkJpQ2s)MoMY>v;tU+jmk%J9S+FRIB^Qar67S3S%pSKSGetK7)t3jBEv zF`V`2fLi#V){{|}ugGyMcs@X@SZMaLpj;IbEmxH)Ay*aqCQ}tYkNDThRJq5?R5@GA zRN>%Pe(_Zq?|Y+p6S+!?3AN=aS9!UrI$~Ie7(O9}PS9Z&_#%$WR2PBQ*Ra#R}xwAG~YQu%btb_&}rVa@7jduU9R(Dt~Kyub*5sw7p#Q zf7-dusH)Cwi+c94#EOa?8yXubiha3N6njBqZxMShSc9?G*n2G4yJweZ)Tm*>+2sHd z1$#HK>owMhii-N?86Ix-aU}2F_ud%y*CS(&Z*w^No8MY%uDQNH(I{aT{huH|KjXaf zan8=@Iq=_nW)qiOw24cB^tJuhcF~H?ov~jNyEq8B9D{u#bK1pOY0y1-Vl}uG=={=S zLCl!ZE>`rjiybT2#i=;&?hbbG^gz2Pcd?80%h<)_KLqh>XyoZ)L7dh@5ObGD46khB zV8rjn5YHOY(=J-k9s4Z6e#fxyL+oE96YBF68oMfpQoJC(0K(u=3iO^`K<*3K#W>_D zAjB@t#M#%hu#5eRpr>%gk1@#mGC^G5R}cpU3u3Tc5DP>9MJ%Y^%`RHeyRBV3RNF4* zC~X%9ppMb6;YYUwao7p;(_ulZ1tcV*_fg+RF9dN4bf2pj>JVfXt2MQY(@~2B(8OwJ zW;6P8FJ|GvA%Ymw0Pp1UbY1mMh8^n-?OA@_X{!*7Va7ixVC`tOdtE@h35B7ow&hYg{e(1%D z!H6LT_V^Ln>W1F>a08k{&RZzZ{VG9>T_K1^b_?PG>@C4>ccDKwquuK_YUa8 z3-57W)ab@BLF^9?crZ~AheLzUQG@E}LpOpws~vi`VmRtp2b#7crZcGVX7t@~>{Dkm zVxBFCnb1!`h^e;&nuNZN;qMQiO+WNQll*p(ko)(jOUwo64tZ)61+7klcZ4GsP2gKK z1o55)sjZPK3&ucu1CYzEHoMpcHEf7l2g{fr&{5H`g4k>nYKn7YTaR|(iS&DNP#k?po0@~|{JT?1-wVw3>^lHU~x^{6nYA>Y;V%&Ai z4QTj1_9%e3t3k&Ld%@dqj-SxiZ7>^e|0syx9ETtMhB~DPVvd)XL8#fBUoaC6VKYlh8B$1hI8DZ~TsWTd@T`mJK}>@m3J8qUS=PI|V!irVHY9K37!(Bl_UZ4wo3pBqP-(4Vx zcc#IE#vu>rK|8n?B7Q6KHL{CVz$C;Km}B>#r@HqAap}oVIj)U*FG3EsVy{UP1hF0R z`DG;f1LrtD1-$^CSu>D#toy$a#OtV0){(e34Hm?{sN3S6&}wJs<*Pq&3eV-BH!BvS z_q(9}C7`8;=>3D|;W5xa^WDfj@;wwBVc@Ja9@>Hy1;q6`_CMpwU#zPcR5J$^HKZwbzfJQREr?C);~vone`|_;@Lem~!)vXmSOs$p z{HwFs#TKc^?_GG!S@^exK2kCFQ3oH?-98H3sHJaXoG%dPDIkbtGT;nvZQ@iL{+1KI zkI%}NgkM#_8E}?!I7d{7HwHj&Rz$-4cEZn-;Qepj;~w(_efBGAo`m}#>Y$?zm%c-O zhv9D57Wa#)(4jB-1hc)z1)CU?WD`eVwiSJ36Mw;3BC`r&g@SmeEbg8);ECjoNYvYk z#ES56f4i6!KDGy5()<;A>H*%p3ccYB3s8q2mOyjR)my|;q8PYe+r(j~ZQ?Qq?pE7y zj>9&wQld>v!#Peow~0{#ys8jtjXVX^@`ep_%ZlfiyC=ZE%NMiE4jsS;YCOUGz9onc zp~WZ1;AP99$3eK;AfMaw3gRi;=|05T#ENTdV#Fewcot``j{8sM&j(_LB$p7EPpPqkr{44(^|8CxNJggf_R-B(1OR}$%=4^9P&uSV4>l%I0 zL8sd{b^3XXL4~3W`q0Oq+hq(&c&L-2>a=p4PLt~B)a!#rQ*Uc@=>gt*qS2BU8a+(a zC={P>&ZN`3EIRG^!W&j@_w)br|7Y`mvs^4$aU>#^!tGAVzTQcP12qcQQ2$Ok9eSWs z__qcv+G$Yj{s!GGWzceHqW%`09`@F0bupb9U(-l3G&*%wqg3d?KSiS^Hl2p%(`k8t zPKSeaN+fTbvc-}WlRCsw;^SB<@|}~01!xoJbu9MAX(7Rm*_4oz6=!4(=3_2LC)0DnCHG!_PD;k~q zN~3;FG&(d*qbxsb)Z-QM5u}sP2%Wy#qtmY2I)!C0s2z~LD$y}y#rr(5v_3kPY7KJI zxjY(;iPk8`Af5K*F{oLzL5+SjXzfIUp5!(tbe~Qe8te22^4@%cMw@bJ6l2q9PbZC* z?b7I**BbS0u2ZXZIt8Ow{p%UDqMt!40r$Eb*t9mr3$f2*DC3S;3hnHq`B^kdJcjsj z{tm?rT6Nl>a?cIQG1Z_yvKe&Yd!3G~hk&iW(JMq0w6Oeu1xb zTI15muM*y0VbD33Hx`YHAuDE_j-jCoW2s}Plj?tTQrV*#-2hrLD?S~RG^$m z1wC*-e;XF#g~t11=nQ(GZ?Kb|zjV@!pESz-jZRzY8dN#WpjUn-r7ke&^#`0OLZ>(P zGWJrD08jLEq*v>DTfm4PS21+fvJS!vSucTHOBf%|z@K&%(uPXTvTC*8a4q^oN*N^YT(nWocGa7ul_JRiqoQ}9;1|7a`P;?!{y&BxlO*-4gMPaR7)a0>AwU?L_SjD7LrwsBNV^CIKgWey2 z-chr8hNsppT zT6W*0n$=u%@J=`8$j8e*^Di|2D1l{>6a1;D+! zaUHY7ii>LuIv$5!eT6w+)}$?+OJ$c1_wPC`AP^|c>x{|Gec zInFU3J>dVrq(hBe)cQLY-A;1RH~HeI;Tsp#p5mga8C+Bj`S8zblHX#3a^``~&;uEu z#f%md4>HJ#0<{dv(ir^?&%58&xa#gqo`lvNU0k~&b=-=T@d-0XHp7kDPSMJ^SCQO51WDXwMGGhtf++Fivihz zcYf$?;0llc909&Uz1Dp+=;nHpzO3n@k*8gBaCjWO%^XjSw#U<(jqy|_HICYKh@;7p zi+0&vbgGX@X~zsIi$u{Jo}Ga z@NM|@73^brF+wN$6v6Lorz`}HGu>1FhBYxq^jBHY2JpzlVy zsO;BqR5UD}d>bWDmTJf8$B^UXJ2rtvBNs_$<0#M;M+<{p^pE}~HD7Jeq(Flv0_p4U z3p!cRFCKSP_@uZ{r;51u6-MvcafeMo|K7xXKHCtTMvO8jX*_xmezbCC9KD<$Pu-R# z(9f%n(@n>5I(a&QetI2G+l$51lE!fqImAT+mYUT55VV7Pj1^XH_p_V7y@$UN_zPM1 zZ+ZBKgFo5BzY6@XJp9q%zwO~a0{$JJ@rV7J{OtK(&^J4t3EQs%Z(WL`B@=9M0}pL^#~y|54bAz!L|8T@0w zpAEI%2L8I>KLhXiM{m>|XD+NOd|<4?Ez3Y_7i5l{FSBo5nWy{8Z03EZ4Zl}@s&FP~?hf%!97U*QXF74(zBCD3E8cM1YB>QCKUh922W>hxQ8Jy(ROcaWZGOLkHg@-$POBDd@jt z606%KzJFNa5{VLbyDssHXA)ONpIC7XzGOw%R+(!(k=ehV!gl1~$vOBq;*AFXj-TO4 z`&2Gn554dUwAWK%f~P)#uWg0DblfcQ<*~>Cay@qkdLdfk>gOe1@EAH1WUlEib9e=r z`&v+hWVWKuAekSq%%k%uoH|tD8Jfb`;E5~1U)Y7YcLY8RJwCapuyYXR&1;zl&Xc)z z0hzzpEwO^wOTlB-9FaKN1@!z2iC@462UeE(YAcyncmES>@Lac?H?nk)dCN|G_D1Hj ztrZ@=Md8dTr~&SJbvEE0xJ2c7B{0Jep!Y)+o_`VA8zOUXn#2p2NSqzII|dyN-Yc;m z@@_noc+3~@#!8q)tz|CL>k|U7x>2`*H=04q;ozVA51H2lD!hM;!oM6+_zvdX2izYj zRKhGW72b~A-%Nq;ES9-j8N|dApY4QL;XSX=d+#Pl`~lj_^s~fv)IQrs_+Uwyb64|5 zoz5~_F&A^IB>4F%z6sV zKnES_C_J*a!VBQJ#lZb#c7?0{ATxE8IW!gfMnT{3yMA{Z>`ZX*=*JE&0lz&}6Ft}i zdW0|CfF75j=k8zqgnOQ~_H6K5AS{)68~7U>LtiAz{LNjN{oY~5I=8YfXwu3vh z8+4jUMw%o}?1GwSkXUj!c-Kk?%SRo2;5P>!6(o+S2=BuzN}P!EY($NI@J6i-=r;?R zZj!kl_-E`xKZ7Uwu*_GYWqyr0mk&9~g}>eZO6C}FulZ5p5%AT}yb>2R9Gnv2;KjWi z{Es;f4wW6e^_+u0ymN5Z(h?tRiTM&4>1#@NS5*+$^nfol4`6GI36YeR5 zd(0L1TvpUO{xy6aaW?^X#zGQ1lN>yCwu7mzgU6S5aM=zHz7*-;1>5o22?sw;aqt#s zBdn>!`?`ChCStO}M67qV%b4#nmx_=%1bGf`hf2KYI0{v0)41MYeY z9DF0x!E-V=xM#{f?&t5|Va**JfqX1T<9%GvuLz`|Nda>L3R%E9>C$clUAf<$XNE zh90Qm;CtO1ybgJ3r8@ZIWoW_%f4~{O0J3@TSNtyUm-g`A@$hf+@P7dR8V`TMzu>>` z;cxBX-w%E_w-ulB_qW79>o4MW^Sk4({8{|VJ@F?Y{+X8e*MAm&I=4H1cP>8XPtU(Q zemB26{w9|Er}Mk>|5x$5`P1|NdHnADr}L-RA*@yAENAVp{ytlLeB20YmC5GglO+p& z`|Kx|Ph+3(h<<~IOc+pQ!hndeLq?5k8q&b0`~U5ydN%h7PH!62DI@sY2y2zWz5MeG zcdFOBdI-O}k=y+Tub=c2^$#BxUU~Ge;h(>4jjgEHn?eSkf5zi}yGl@S)u7tJHL6#u zQL}pWI@Lbk$$G|IUOO$@`sYrb%!IYF+bem# Tensor: + low = input["low"] + high = input["high"] + + x = self.cbr(high) + s = self.scale(high) + x = x * s + x = F.interpolate(x, size=low.shape[-2:], mode='bilinear', align_corners=False) + + return self.low_classifier(low) + self.high_classifier(x) diff --git a/torchvision/models/segmentation/segmentation.py b/torchvision/models/segmentation/segmentation.py index 68c298e683a..b3c584d7823 100644 --- a/torchvision/models/segmentation/segmentation.py +++ b/torchvision/models/segmentation/segmentation.py @@ -4,10 +4,11 @@ from .. import resnet from .deeplabv3 import DeepLabHead, DeepLabV3 from .fcn import FCN, FCNHead +from .lraspp import LRASPP __all__ = ['fcn_resnet50', 'fcn_resnet101', 'fcn_mobilenet_v3_large', 'deeplabv3_resnet50', 'deeplabv3_resnet101', - 'deeplabv3_mobilenet_v3_large'] + 'deeplabv3_mobilenet_v3_large', 'lraspp_mobilenet_v3_large'] model_urls = { @@ -17,6 +18,7 @@ 'deeplabv3_resnet50_coco': 'https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth', 'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth', 'deeplabv3_mobilenet_v3_large_coco': None, + 'lraspp_mobilenet_v3_large_coco': None, } @@ -69,13 +71,34 @@ def _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss aux_loss = True model = _segm_model(arch_type, backbone, num_classes, aux_loss, **kwargs) if pretrained: - arch = arch_type + '_' + backbone + '_coco' - model_url = model_urls[arch] - if model_url is None: - raise NotImplementedError('pretrained {} is not supported as of now'.format(arch)) - else: - state_dict = load_state_dict_from_url(model_url, progress=progress) - model.load_state_dict(state_dict) + _load_weights(model, arch_type, backbone, progress) + return model + + +def _load_weights(model, arch_type, backbone, progress): + arch = arch_type + '_' + backbone + '_coco' + model_url = model_urls[arch] + if model_url is None: + raise NotImplementedError('pretrained {} is not supported as of now'.format(arch)) + else: + state_dict = load_state_dict_from_url(model_url, progress=progress) + model.load_state_dict(state_dict) + + +def _segm_lraspp_mobilenetv3(backbone_name, num_classes, pretrained_backbone=True): + backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, _dilated=True).features + + # Gather the indeces of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. + # The first and last blocks are always included because they are the C0 (conv1) and Cn. + stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1] + low_pos = stage_indices[-4] # use C2 here which has output_stride = 8 + high_pos = stage_indices[-1] # use C5 which has output_stride = 16 + low_channels = backbone[low_pos].out_channels + high_channels = backbone[high_pos].out_channels + + backbone = IntermediateLayerGetter(backbone, return_layers={str(low_pos): 'low', str(high_pos): 'high'}) + + model = LRASPP(backbone, low_channels, high_channels, num_classes) return model @@ -161,3 +184,24 @@ def deeplabv3_mobilenet_v3_large(pretrained=False, progress=True, aux_loss (bool): If True, it uses an auxiliary loss """ return _load_model('deeplabv3', 'mobilenet_v3_large', pretrained, progress, num_classes, aux_loss, **kwargs) + + +def lraspp_mobilenet_v3_large(pretrained=False, progress=True, num_classes=21, **kwargs): + """Constructs a Lite R-ASPP Network model with a MobileNetV3-Large backbone. + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 which + contains the same classes as Pascal VOC + progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + """ + if kwargs.pop("aux_loss", False): + raise NotImplementedError('This model does not use auxiliary loss') + + backbone_name = 'mobilenet_v3_large' + model = _segm_lraspp_mobilenetv3(backbone_name, num_classes, **kwargs) + + if pretrained: + _load_weights(model, 'lraspp', backbone_name, progress) + + return model From 406fa470a884f8ae3cb7bd7d5aceff491ebc97b7 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Wed, 27 Jan 2021 12:07:50 +0000 Subject: [PATCH 07/10] Add pretrained model weights. --- torchvision/models/segmentation/segmentation.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/torchvision/models/segmentation/segmentation.py b/torchvision/models/segmentation/segmentation.py index b3c584d7823..53e436faa00 100644 --- a/torchvision/models/segmentation/segmentation.py +++ b/torchvision/models/segmentation/segmentation.py @@ -14,11 +14,12 @@ model_urls = { 'fcn_resnet50_coco': 'https://download.pytorch.org/models/fcn_resnet50_coco-1167a1af.pth', 'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth', - 'fcn_mobilenet_v3_large_coco': None, + 'fcn_mobilenet_v3_large_coco': 'https://download.pytorch.org/models/fcn_mobilenet_v3_large-7191edb4.pth', 'deeplabv3_resnet50_coco': 'https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth', 'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth', - 'deeplabv3_mobilenet_v3_large_coco': None, - 'lraspp_mobilenet_v3_large_coco': None, + 'deeplabv3_mobilenet_v3_large_coco': + 'https://download.pytorch.org/models/deeplabv3_mobilenet_v3_large-fc3c493d.pth', + 'lraspp_mobilenet_v3_large_coco': 'https://download.pytorch.org/models/lraspp_mobilenet_v3_large-d234d4ea.pth', } @@ -77,7 +78,7 @@ def _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss def _load_weights(model, arch_type, backbone, progress): arch = arch_type + '_' + backbone + '_coco' - model_url = model_urls[arch] + model_url = model_urls.get(arch, None) if model_url is None: raise NotImplementedError('pretrained {} is not supported as of now'.format(arch)) else: From 1641d5f4c7d41f534444fab340c598d61a91bd12 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Wed, 27 Jan 2021 12:19:53 +0000 Subject: [PATCH 08/10] Removing model fcn_mobilenet_v3_large. --- ...ster.test_fcn_mobilenet_v3_large_expect.pkl | Bin 41785 -> 0 bytes test/test_models.py | 1 - .../models/segmentation/segmentation.py | 17 +---------------- 3 files changed, 1 insertion(+), 17 deletions(-) delete mode 100644 test/expect/ModelTester.test_fcn_mobilenet_v3_large_expect.pkl diff --git a/test/expect/ModelTester.test_fcn_mobilenet_v3_large_expect.pkl b/test/expect/ModelTester.test_fcn_mobilenet_v3_large_expect.pkl deleted file mode 100644 index c12de9b825c735519775d4b5891ed8dbe24d1e07..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 41785 zcmeHJO>CUi75yA1v6-q3f&~i#%F>l+9H%BxBoHDXOE91ss!9=?$hAjcVZ|AH61K2o z!;b_k5E25`v=T^2UEpQ6mAdMdcF}gxZ2__Bca=Hm@y)&O=X)M|ATQ`~|IRu0zWcol ztoCP*H_iNfbH|^1oBNuzo%L5XZw?kW)<$b*ufMvra`_j{OZ)!3er$cT-J;H3y)oL{ z+U;H48T|gn=GMm5(cs$d_RiIt=X=9uVd2VLtH0YK*IphBXKqn?D_h%Zqu<_2_J+r< zoN6&k%jcFaUTpupwEXD#OBaT-S7zJn`t=*bqv44c`prUrrr+zI>7N-5PcF3C z=bPhgG&35`y}7==y|p!1A8l@5+wJ{kXJfE4*m!z#ebgJy|MJD*J)>do#o^D+uJ-5V ze|YYJ*S>0{<*d3(INdzCZwZTPzLm?5tA*^_PCPh$?xV-*sXM4|puPd`4Ls!hMm*T& zrFoP&ya%4a z%5k0j94zzhRv-I<&D{fdu(^E3m%im(elDJ875~T1$9+n_T`3=P9JW2`?>EhFNWQ7} zU3qWp^Y`<(j1ewyo{q*#EF8h-@>z?0-J4_d0iJ7K&cXnzZ4Op*B`)P#nn#(#d*B(Y z9M{>;!7}fs=>O~4hxh8KJE(78c+6ug z=Kdc3?;pOgS5MtReFOCk)HiVF8~7&U_tC;yxh{JS>HH&M#t=Vuo|Jwe&0Idu&fFAV z=1Ad(=Iv7B*}Cv6=gz;2btiQv^$pZFaF}m^|JxDkYhkTiOvkE+bW;9X6XIL_Lb;TF z2g&Ezxp#^$bENP?^LDB6Y+d+=#X0xZ&%dsx?x4Pb`Ub2wu-N`SEVW0h9AG#nKJvin zI`Y7wj@YwWTVfn=IMy?!@S)>b*7PtugT|!rHHTx(=Q{x>ImUUu;4MCMN-g&-)tVN5 z>Ap(%rTZ-9AN4mV*`3T@%0KG+PecFGH&n`>h8-N`45e?lgfHJt_+t4xa*p+Z({<#r zN7NB}R%=U)0}jV}#uPquJj26u#zgtoeK=;3UU5&lkMKhfb;GzNK2z!Y|!d z3BPooG5)u!m-gzZJE(7JI7~sBa+l2HvLMigz8K zFuWPw{=;T<;m&2*p{QuwKJ@a>FpD}vJTw{*LO;cPt zXDL5*7t#kOm6L+wZ!~}aZ=k*b>kT|J{(TVJ zr=zi4V-EX_(fn8}+!zLR3v1FrtC`NG!gNxew~dS^Q9M^c3EtPOLxbd{5+Z zY$>06VV|)a*V(uDT=)F0Jm_`d;|+D)-$C#(zu4S6=($VzJWGnN`=@X;M$fEcjn}cp z>saG!y#KepbYG=>e2-F{E_}RSSj>`Y!GlfZ0SBH~nI8vrSmebrhkeF?muiJug3aM` zAKY7Vd^d@uhWjDrKByyS@wu1f3ultY>S^Wpo>o1UE9J93_8H4@oqdbXbKm}$z0Er>sC*lLqDCD82xpPIqYj(EcUqE z7ruqHa-q3H^Po>xObTCXGY38_V>!on<~)2@=BM~wdC)H5GnelN?U?P#2T%9pJba!( z@RCQ(<1@$d!Iv6*SPQRnjD-)Yv9Vb5bbQ$SSU+9QotOFep2)El`>1Oh;%lrIr}Xi- zk}u_lv?p29e|K_wmGX1@9V8y#jPC$nYNhp*S<%nJqQ9;&hkcEU#U7XY!nd$iPI`h9 z>(BKRP6!jKaUb@X10UAI@tqk9AC~zkepeo}OZd#?`$0QqyYj))Jvk4bXAr#PQS2~dcq!rCeO;grES$Y$G*kaImWXuIF8w84bHRAm|S0W>zx!W za8t01@pxXIx#Fkdn8!QQzK+4m&4cGz*7GpPTUaXxe5w_VkWQ#3Jz{)o4*6ctwJdVr zA{L8p%{j~g3rxsDTP$$U)H=Yx3SN91i;r5aWi6fuI=VOVu(3LDY|)3g@KFOEdBkFY zLG8Qm@87GZ?x4Pb`Ub2w@NL2GgK$guJkR&zIbgX4A2nFy#UcmGwV3a5y!&aqj?wR+ zb_2a>vEF?TYraYM*WXRYa?j8V7QX9lJT2+()pB}41D3hg9g#zS@L*$nuEU2WVzHJU z*V!-SLo21HV61+*Goz=cg?h}E@?-j_%e_ zgN5(98&6C6do}L0+MkXv5n-A?ago|tKF-$&SsOUvh$FJ5f_y|n!3`AZkH63=*QpVGUZ{$FXlnbrQulaGu+ q56}MUHhgyM!Txz1-}l)qr{&Fw_qTD}&e$&G#7W7`w*LtJ7WRLwf|!Z` diff --git a/test/test_models.py b/test/test_models.py index 02c712c6ef4..9b26839fa0b 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -64,7 +64,6 @@ def get_available_video_models(): "deeplabv3_mobilenet_v3_large", "fcn_resnet50", "fcn_resnet101", - "fcn_mobilenet_v3_large", "lraspp_mobilenet_v3_large", ) diff --git a/torchvision/models/segmentation/segmentation.py b/torchvision/models/segmentation/segmentation.py index 53e436faa00..4a41e899ea4 100644 --- a/torchvision/models/segmentation/segmentation.py +++ b/torchvision/models/segmentation/segmentation.py @@ -7,14 +7,13 @@ from .lraspp import LRASPP -__all__ = ['fcn_resnet50', 'fcn_resnet101', 'fcn_mobilenet_v3_large', 'deeplabv3_resnet50', 'deeplabv3_resnet101', +__all__ = ['fcn_resnet50', 'fcn_resnet101', 'deeplabv3_resnet50', 'deeplabv3_resnet101', 'deeplabv3_mobilenet_v3_large', 'lraspp_mobilenet_v3_large'] model_urls = { 'fcn_resnet50_coco': 'https://download.pytorch.org/models/fcn_resnet50_coco-1167a1af.pth', 'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth', - 'fcn_mobilenet_v3_large_coco': 'https://download.pytorch.org/models/fcn_mobilenet_v3_large-7191edb4.pth', 'deeplabv3_resnet50_coco': 'https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth', 'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth', 'deeplabv3_mobilenet_v3_large_coco': @@ -131,20 +130,6 @@ def fcn_resnet101(pretrained=False, progress=True, return _load_model('fcn', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs) -def fcn_mobilenet_v3_large(pretrained=False, progress=True, - num_classes=21, aux_loss=None, **kwargs): - """Constructs a Fully-Convolutional Network model with a MobileNetV3-Large backbone. - - Args: - pretrained (bool): If True, returns a model pre-trained on COCO train2017 which - contains the same classes as Pascal VOC - progress (bool): If True, displays a progress bar of the download to stderr - num_classes (int): number of output classes of the model (including the background) - aux_loss (bool): If True, it uses an auxiliary loss - """ - return _load_model('fcn', 'mobilenet_v3_large', pretrained, progress, num_classes, aux_loss, **kwargs) - - def deeplabv3_resnet50(pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs): """Constructs a DeepLabV3 model with a ResNet-50 backbone. From 75170d5bf8df707d2f3a7a2943c1489f1250b033 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Wed, 27 Jan 2021 12:37:17 +0000 Subject: [PATCH 09/10] Adding docs and imports. --- docs/source/models.rst | 12 +++++++++++- hubconf.py | 2 +- references/segmentation/README.md | 10 ++++++++++ torchvision/models/segmentation/__init__.py | 1 + torchvision/models/segmentation/lraspp.py | 4 +++- 5 files changed, 26 insertions(+), 3 deletions(-) diff --git a/docs/source/models.rst b/docs/source/models.rst index 7fbae2a55d1..f4188a5ad1f 100644 --- a/docs/source/models.rst +++ b/docs/source/models.rst @@ -271,7 +271,8 @@ The models subpackage contains definitions for the following model architectures for semantic segmentation: - `FCN ResNet50, ResNet101 `_ -- `DeepLabV3 ResNet50, ResNet101 `_ +- `DeepLabV3 ResNet50, ResNet101, MobileNetV3-Large `_ +- `LR-ASPP MobileNetV3-Large `_ As with image classification models, all pre-trained models expect input images normalized in the same way. The images have to be loaded in to a range of ``[0, 1]`` and then normalized using @@ -298,6 +299,8 @@ FCN ResNet50 60.5 91.4 FCN ResNet101 63.7 91.9 DeepLabV3 ResNet50 66.4 92.4 DeepLabV3 ResNet101 67.4 92.4 +DeepLabV3 MobileNetV3-Large 60.3 91.2 +LR-ASPP MobileNetV3-Large 57.9 91.2 ================================ ============= ==================== @@ -313,6 +316,13 @@ DeepLabV3 .. autofunction:: torchvision.models.segmentation.deeplabv3_resnet50 .. autofunction:: torchvision.models.segmentation.deeplabv3_resnet101 +.. autofunction:: torchvision.models.segmentation.deeplabv3_mobilenet_v3_large + + +LR-ASPP +------- + +.. autofunction:: torchvision.models.segmentation.lraspp_mobilenet_v3_large Object Detection, Instance Segmentation and Person Keypoint Detection diff --git a/hubconf.py b/hubconf.py index dec4a7fb196..097759bdd89 100644 --- a/hubconf.py +++ b/hubconf.py @@ -18,4 +18,4 @@ # segmentation from torchvision.models.segmentation import fcn_resnet50, fcn_resnet101, \ - deeplabv3_resnet50, deeplabv3_resnet101 + deeplabv3_resnet50, deeplabv3_resnet101, deeplabv3_mobilenet_v3_large, lraspp_mobilenet_v3_large diff --git a/references/segmentation/README.md b/references/segmentation/README.md index 34db88c7a3a..a4b570dd23e 100644 --- a/references/segmentation/README.md +++ b/references/segmentation/README.md @@ -31,3 +31,13 @@ python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py --lr 0. ``` python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py --lr 0.02 --dataset coco -b 4 --model deeplabv3_resnet101 --aux-loss ``` + +## deeplabv3_mobilenet_v3_large +``` +sbatch launch_job.sh --dataset coco -b 4 --model deeplabv3_mobilenet_v3_large --aux-loss --wd 0.000001 +``` + +## lraspp_mobilenet_v3_large +``` +sbatch launch_job.sh --dataset coco -b 4 --model lraspp_mobilenet_v3_large --wd 0.000001 +``` diff --git a/torchvision/models/segmentation/__init__.py b/torchvision/models/segmentation/__init__.py index 43c80c355ad..fb6633d7fb5 100644 --- a/torchvision/models/segmentation/__init__.py +++ b/torchvision/models/segmentation/__init__.py @@ -1,3 +1,4 @@ from .segmentation import * from .fcn import * from .deeplabv3 import * +from .lraspp import * diff --git a/torchvision/models/segmentation/lraspp.py b/torchvision/models/segmentation/lraspp.py index 37fd8afeb58..44cd9b1e773 100644 --- a/torchvision/models/segmentation/lraspp.py +++ b/torchvision/models/segmentation/lraspp.py @@ -10,7 +10,9 @@ class LRASPP(nn.Module): """ - Implements a Lite R-ASPP Network for semantic segmentation. + Implements a Lite R-ASPP Network for semantic segmentation from + `"Searching for MobileNetV3" + `_. Args: backbone (nn.Module): the network used to compute the features for the model. From 76d537b673b745a6b315bed244ff17e779c7e634 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Wed, 27 Jan 2021 13:36:58 +0000 Subject: [PATCH 10/10] Fixing typo and readme. --- references/segmentation/README.md | 4 ++-- torchvision/models/detection/backbone_utils.py | 2 +- torchvision/models/segmentation/segmentation.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/references/segmentation/README.md b/references/segmentation/README.md index a4b570dd23e..6e24f836624 100644 --- a/references/segmentation/README.md +++ b/references/segmentation/README.md @@ -34,10 +34,10 @@ python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py --lr 0. ## deeplabv3_mobilenet_v3_large ``` -sbatch launch_job.sh --dataset coco -b 4 --model deeplabv3_mobilenet_v3_large --aux-loss --wd 0.000001 +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py --dataset coco -b 4 --model deeplabv3_mobilenet_v3_large --aux-loss --wd 0.000001 ``` ## lraspp_mobilenet_v3_large ``` -sbatch launch_job.sh --dataset coco -b 4 --model lraspp_mobilenet_v3_large --wd 0.000001 +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py --dataset coco -b 4 --model lraspp_mobilenet_v3_large --wd 0.000001 ``` diff --git a/torchvision/models/detection/backbone_utils.py b/torchvision/models/detection/backbone_utils.py index 5c8b6783d83..45f311d160c 100644 --- a/torchvision/models/detection/backbone_utils.py +++ b/torchvision/models/detection/backbone_utils.py @@ -136,7 +136,7 @@ def mobilenet_backbone( ): backbone = mobilenet.__dict__[backbone_name](pretrained=pretrained, norm_layer=norm_layer).features - # Gather the indeces of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. + # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. # The first and last blocks are always included because they are the C0 (conv1) and Cn. stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1] num_stages = len(stage_indices) diff --git a/torchvision/models/segmentation/segmentation.py b/torchvision/models/segmentation/segmentation.py index 4a41e899ea4..371be9b97da 100644 --- a/torchvision/models/segmentation/segmentation.py +++ b/torchvision/models/segmentation/segmentation.py @@ -34,7 +34,7 @@ def _segm_model(name, backbone_name, num_classes, aux, pretrained_backbone=True) elif 'mobilenet_v3' in backbone_name: backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, _dilated=True).features - # Gather the indeces of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. + # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. # The first and last blocks are always included because they are the C0 (conv1) and Cn. stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1] out_pos = stage_indices[-1] # use C5 which has output_stride = 16 @@ -88,7 +88,7 @@ def _load_weights(model, arch_type, backbone, progress): def _segm_lraspp_mobilenetv3(backbone_name, num_classes, pretrained_backbone=True): backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, _dilated=True).features - # Gather the indeces of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. + # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. # The first and last blocks are always included because they are the C0 (conv1) and Cn. stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1] low_pos = stage_indices[-4] # use C2 here which has output_stride = 8