Skip to content

Commit 79f6c6d

Browse files
committed
Rename ConvNormActivation to Conv2dNormActivation
1 parent e13206d commit 79f6c6d

File tree

13 files changed

+67
-57
lines changed

13 files changed

+67
-57
lines changed

docs/source/ops.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,5 +45,5 @@ Operators
4545
FeaturePyramidNetwork
4646
StochasticDepth
4747
FrozenBatchNorm2d
48-
ConvNormActivation
48+
Conv2dNormActivation
4949
SqueezeExcitation

torchvision/models/convnext.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from torch.nn import functional as F
77

88
from .._internally_replaced_utils import load_state_dict_from_url
9-
from ..ops.misc import ConvNormActivation
9+
from ..ops.misc import Conv2dNormActivation
1010
from ..ops.stochastic_depth import StochasticDepth
1111
from ..utils import _log_api_usage_once
1212

@@ -127,7 +127,7 @@ def __init__(
127127
# Stem
128128
firstconv_output_channels = block_setting[0].input_channels
129129
layers.append(
130-
ConvNormActivation(
130+
Conv2dNormActivation(
131131
3,
132132
firstconv_output_channels,
133133
kernel_size=4,

torchvision/models/detection/ssdlite.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from torch import nn, Tensor
88

99
from ..._internally_replaced_utils import load_state_dict_from_url
10-
from ...ops.misc import ConvNormActivation
10+
from ...ops.misc import Conv2dNormActivation
1111
from ...utils import _log_api_usage_once
1212
from .. import mobilenet
1313
from . import _utils as det_utils
@@ -29,7 +29,7 @@ def _prediction_block(
2929
) -> nn.Sequential:
3030
return nn.Sequential(
3131
# 3x3 depthwise with stride 1 and padding 1
32-
ConvNormActivation(
32+
Conv2dNormActivation(
3333
in_channels,
3434
in_channels,
3535
kernel_size=kernel_size,
@@ -47,11 +47,11 @@ def _extra_block(in_channels: int, out_channels: int, norm_layer: Callable[...,
4747
intermediate_channels = out_channels // 2
4848
return nn.Sequential(
4949
# 1x1 projection to half output channels
50-
ConvNormActivation(
50+
Conv2dNormActivation(
5151
in_channels, intermediate_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=activation
5252
),
5353
# 3x3 depthwise with stride 2 and padding 1
54-
ConvNormActivation(
54+
Conv2dNormActivation(
5555
intermediate_channels,
5656
intermediate_channels,
5757
kernel_size=3,
@@ -61,7 +61,7 @@ def _extra_block(in_channels: int, out_channels: int, norm_layer: Callable[...,
6161
activation_layer=activation,
6262
),
6363
# 1x1 projetion to output channels
64-
ConvNormActivation(
64+
Conv2dNormActivation(
6565
intermediate_channels, out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=activation
6666
),
6767
)

torchvision/models/efficientnet.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from torchvision.ops import StochasticDepth
99

1010
from .._internally_replaced_utils import load_state_dict_from_url
11-
from ..ops.misc import ConvNormActivation, SqueezeExcitation
11+
from ..ops.misc import Conv2dNormActivation, SqueezeExcitation
1212
from ..utils import _log_api_usage_once
1313
from ._utils import _make_divisible
1414

@@ -104,7 +104,7 @@ def __init__(
104104
expanded_channels = cnf.adjust_channels(cnf.input_channels, cnf.expand_ratio)
105105
if expanded_channels != cnf.input_channels:
106106
layers.append(
107-
ConvNormActivation(
107+
Conv2dNormActivation(
108108
cnf.input_channels,
109109
expanded_channels,
110110
kernel_size=1,
@@ -115,7 +115,7 @@ def __init__(
115115

116116
# depthwise
117117
layers.append(
118-
ConvNormActivation(
118+
Conv2dNormActivation(
119119
expanded_channels,
120120
expanded_channels,
121121
kernel_size=cnf.kernel,
@@ -132,7 +132,7 @@ def __init__(
132132

133133
# project
134134
layers.append(
135-
ConvNormActivation(
135+
Conv2dNormActivation(
136136
expanded_channels, cnf.out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=None
137137
)
138138
)
@@ -193,7 +193,7 @@ def __init__(
193193
# building first layer
194194
firstconv_output_channels = inverted_residual_setting[0].input_channels
195195
layers.append(
196-
ConvNormActivation(
196+
Conv2dNormActivation(
197197
3, firstconv_output_channels, kernel_size=3, stride=2, norm_layer=norm_layer, activation_layer=nn.SiLU
198198
)
199199
)
@@ -224,7 +224,7 @@ def __init__(
224224
lastconv_input_channels = inverted_residual_setting[-1].out_channels
225225
lastconv_output_channels = 4 * lastconv_input_channels
226226
layers.append(
227-
ConvNormActivation(
227+
Conv2dNormActivation(
228228
lastconv_input_channels,
229229
lastconv_output_channels,
230230
kernel_size=1,

torchvision/models/mobilenetv2.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from torch import nn
77

88
from .._internally_replaced_utils import load_state_dict_from_url
9-
from ..ops.misc import ConvNormActivation
9+
from ..ops.misc import Conv2dNormActivation
1010
from ..utils import _log_api_usage_once
1111
from ._utils import _make_divisible
1212

@@ -20,11 +20,11 @@
2020

2121

2222
# necessary for backwards compatibility
23-
class _DeprecatedConvBNAct(ConvNormActivation):
23+
class _DeprecatedConvBNAct(Conv2dNormActivation):
2424
def __init__(self, *args, **kwargs):
2525
warnings.warn(
2626
"The ConvBNReLU/ConvBNActivation classes are deprecated since 0.12 and will be removed in 0.14. "
27-
"Use torchvision.ops.misc.ConvNormActivation instead.",
27+
"Use torchvision.ops.misc.Conv2dNormActivation instead.",
2828
FutureWarning,
2929
)
3030
if kwargs.get("norm_layer", None) is None:
@@ -56,12 +56,12 @@ def __init__(
5656
if expand_ratio != 1:
5757
# pw
5858
layers.append(
59-
ConvNormActivation(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer, activation_layer=nn.ReLU6)
59+
Conv2dNormActivation(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer, activation_layer=nn.ReLU6)
6060
)
6161
layers.extend(
6262
[
6363
# dw
64-
ConvNormActivation(
64+
Conv2dNormActivation(
6565
hidden_dim,
6666
hidden_dim,
6767
stride=stride,
@@ -144,7 +144,7 @@ def __init__(
144144
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
145145
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
146146
features: List[nn.Module] = [
147-
ConvNormActivation(3, input_channel, stride=2, norm_layer=norm_layer, activation_layer=nn.ReLU6)
147+
Conv2dNormActivation(3, input_channel, stride=2, norm_layer=norm_layer, activation_layer=nn.ReLU6)
148148
]
149149
# building inverted residual blocks
150150
for t, c, n, s in inverted_residual_setting:
@@ -155,7 +155,7 @@ def __init__(
155155
input_channel = output_channel
156156
# building last several layers
157157
features.append(
158-
ConvNormActivation(
158+
Conv2dNormActivation(
159159
input_channel, self.last_channel, kernel_size=1, norm_layer=norm_layer, activation_layer=nn.ReLU6
160160
)
161161
)

torchvision/models/mobilenetv3.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from torch import nn, Tensor
77

88
from .._internally_replaced_utils import load_state_dict_from_url
9-
from ..ops.misc import ConvNormActivation, SqueezeExcitation as SElayer
9+
from ..ops.misc import Conv2dNormActivation, SqueezeExcitation as SElayer
1010
from ..utils import _log_api_usage_once
1111
from ._utils import _make_divisible
1212

@@ -83,7 +83,7 @@ def __init__(
8383
# expand
8484
if cnf.expanded_channels != cnf.input_channels:
8585
layers.append(
86-
ConvNormActivation(
86+
Conv2dNormActivation(
8787
cnf.input_channels,
8888
cnf.expanded_channels,
8989
kernel_size=1,
@@ -95,7 +95,7 @@ def __init__(
9595
# depthwise
9696
stride = 1 if cnf.dilation > 1 else cnf.stride
9797
layers.append(
98-
ConvNormActivation(
98+
Conv2dNormActivation(
9999
cnf.expanded_channels,
100100
cnf.expanded_channels,
101101
kernel_size=cnf.kernel,
@@ -112,7 +112,7 @@ def __init__(
112112

113113
# project
114114
layers.append(
115-
ConvNormActivation(
115+
Conv2dNormActivation(
116116
cnf.expanded_channels, cnf.out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=None
117117
)
118118
)
@@ -172,7 +172,7 @@ def __init__(
172172
# building first layer
173173
firstconv_output_channels = inverted_residual_setting[0].input_channels
174174
layers.append(
175-
ConvNormActivation(
175+
Conv2dNormActivation(
176176
3,
177177
firstconv_output_channels,
178178
kernel_size=3,
@@ -190,7 +190,7 @@ def __init__(
190190
lastconv_input_channels = inverted_residual_setting[-1].out_channels
191191
lastconv_output_channels = 6 * lastconv_input_channels
192192
layers.append(
193-
ConvNormActivation(
193+
Conv2dNormActivation(
194194
lastconv_input_channels,
195195
lastconv_output_channels,
196196
kernel_size=1,

torchvision/models/optical_flow/raft.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from torch import Tensor
77
from torch.nn.modules.batchnorm import BatchNorm2d
88
from torch.nn.modules.instancenorm import InstanceNorm2d
9-
from torchvision.ops import ConvNormActivation
9+
from torchvision.ops import Conv2dNormActivation
1010

1111
from ..._internally_replaced_utils import load_state_dict_from_url
1212
from ...utils import _log_api_usage_once
@@ -38,17 +38,17 @@ def __init__(self, in_channels, out_channels, *, norm_layer, stride=1):
3838
# and frozen for the rest of the training process (i.e. set as eval()). The bias term is thus still useful
3939
# for the rest of the datasets. Technically, we could remove the bias for other norm layers like Instance norm
4040
# because these aren't frozen, but we don't bother (also, we woudn't be able to load the original weights).
41-
self.convnormrelu1 = ConvNormActivation(
41+
self.convnormrelu1 = Conv2dNormActivation(
4242
in_channels, out_channels, norm_layer=norm_layer, kernel_size=3, stride=stride, bias=True
4343
)
44-
self.convnormrelu2 = ConvNormActivation(
44+
self.convnormrelu2 = Conv2dNormActivation(
4545
out_channels, out_channels, norm_layer=norm_layer, kernel_size=3, bias=True
4646
)
4747

4848
if stride == 1:
4949
self.downsample = nn.Identity()
5050
else:
51-
self.downsample = ConvNormActivation(
51+
self.downsample = Conv2dNormActivation(
5252
in_channels,
5353
out_channels,
5454
norm_layer=norm_layer,
@@ -77,21 +77,21 @@ def __init__(self, in_channels, out_channels, *, norm_layer, stride=1):
7777
super().__init__()
7878

7979
# See note in ResidualBlock for the reason behind bias=True
80-
self.convnormrelu1 = ConvNormActivation(
80+
self.convnormrelu1 = Conv2dNormActivation(
8181
in_channels, out_channels // 4, norm_layer=norm_layer, kernel_size=1, bias=True
8282
)
83-
self.convnormrelu2 = ConvNormActivation(
83+
self.convnormrelu2 = Conv2dNormActivation(
8484
out_channels // 4, out_channels // 4, norm_layer=norm_layer, kernel_size=3, stride=stride, bias=True
8585
)
86-
self.convnormrelu3 = ConvNormActivation(
86+
self.convnormrelu3 = Conv2dNormActivation(
8787
out_channels // 4, out_channels, norm_layer=norm_layer, kernel_size=1, bias=True
8888
)
8989
self.relu = nn.ReLU(inplace=True)
9090

9191
if stride == 1:
9292
self.downsample = nn.Identity()
9393
else:
94-
self.downsample = ConvNormActivation(
94+
self.downsample = Conv2dNormActivation(
9595
in_channels,
9696
out_channels,
9797
norm_layer=norm_layer,
@@ -124,7 +124,7 @@ def __init__(self, *, block=ResidualBlock, layers=(64, 64, 96, 128, 256), norm_l
124124
assert len(layers) == 5
125125

126126
# See note in ResidualBlock for the reason behind bias=True
127-
self.convnormrelu = ConvNormActivation(3, layers[0], norm_layer=norm_layer, kernel_size=7, stride=2, bias=True)
127+
self.convnormrelu = Conv2dNormActivation(3, layers[0], norm_layer=norm_layer, kernel_size=7, stride=2, bias=True)
128128

129129
self.layer1 = self._make_2_blocks(block, layers[0], layers[1], norm_layer=norm_layer, first_stride=1)
130130
self.layer2 = self._make_2_blocks(block, layers[1], layers[2], norm_layer=norm_layer, first_stride=2)
@@ -170,17 +170,17 @@ def __init__(self, *, in_channels_corr, corr_layers=(256, 192), flow_layers=(128
170170
assert len(flow_layers) == 2
171171
assert len(corr_layers) in (1, 2)
172172

173-
self.convcorr1 = ConvNormActivation(in_channels_corr, corr_layers[0], norm_layer=None, kernel_size=1)
173+
self.convcorr1 = Conv2dNormActivation(in_channels_corr, corr_layers[0], norm_layer=None, kernel_size=1)
174174
if len(corr_layers) == 2:
175-
self.convcorr2 = ConvNormActivation(corr_layers[0], corr_layers[1], norm_layer=None, kernel_size=3)
175+
self.convcorr2 = Conv2dNormActivation(corr_layers[0], corr_layers[1], norm_layer=None, kernel_size=3)
176176
else:
177177
self.convcorr2 = nn.Identity()
178178

179-
self.convflow1 = ConvNormActivation(2, flow_layers[0], norm_layer=None, kernel_size=7)
180-
self.convflow2 = ConvNormActivation(flow_layers[0], flow_layers[1], norm_layer=None, kernel_size=3)
179+
self.convflow1 = Conv2dNormActivation(2, flow_layers[0], norm_layer=None, kernel_size=7)
180+
self.convflow2 = Conv2dNormActivation(flow_layers[0], flow_layers[1], norm_layer=None, kernel_size=3)
181181

182182
# out_channels - 2 because we cat the flow (2 channels) at the end
183-
self.conv = ConvNormActivation(
183+
self.conv = Conv2dNormActivation(
184184
corr_layers[-1] + flow_layers[-1], out_channels - 2, norm_layer=None, kernel_size=3
185185
)
186186

@@ -301,7 +301,7 @@ class MaskPredictor(nn.Module):
301301

302302
def __init__(self, *, in_channels, hidden_size, multiplier=0.25):
303303
super().__init__()
304-
self.convrelu = ConvNormActivation(in_channels, hidden_size, norm_layer=None, kernel_size=3)
304+
self.convrelu = Conv2dNormActivation(in_channels, hidden_size, norm_layer=None, kernel_size=3)
305305
# 8 * 8 * 9 because the predicted flow is downsampled by 8, from the downsampling of the initial FeatureEncoder
306306
# and we interpolate with all 9 surrounding neighbors. See paper and appendix B.
307307
self.conv = nn.Conv2d(hidden_size, 8 * 8 * 9, 1, padding=0)

torchvision/models/quantization/mobilenetv2.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from torchvision.models.mobilenetv2 import InvertedResidual, MobileNetV2, model_urls
77

88
from ..._internally_replaced_utils import load_state_dict_from_url
9-
from ...ops.misc import ConvNormActivation
9+
from ...ops.misc import Conv2dNormActivation
1010
from .utils import _fuse_modules, _replace_relu, quantize_model
1111

1212

@@ -54,7 +54,7 @@ def forward(self, x: Tensor) -> Tensor:
5454

5555
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
5656
for m in self.modules():
57-
if type(m) is ConvNormActivation:
57+
if type(m) is Conv2dNormActivation:
5858
_fuse_modules(m, ["0", "1", "2"], is_qat, inplace=True)
5959
if type(m) is QuantizableInvertedResidual:
6060
m.fuse_model(is_qat)

torchvision/models/quantization/mobilenetv3.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from torch.ao.quantization import QuantStub, DeQuantStub
66

77
from ..._internally_replaced_utils import load_state_dict_from_url
8-
from ...ops.misc import ConvNormActivation, SqueezeExcitation
8+
from ...ops.misc import Conv2dNormActivation, SqueezeExcitation
99
from ..mobilenetv3 import InvertedResidual, InvertedResidualConfig, MobileNetV3, model_urls, _mobilenet_v3_conf
1010
from .utils import _fuse_modules, _replace_relu
1111

@@ -103,7 +103,7 @@ def forward(self, x: Tensor) -> Tensor:
103103

104104
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
105105
for m in self.modules():
106-
if type(m) is ConvNormActivation:
106+
if type(m) is Conv2dNormActivation:
107107
modules_to_fuse = ["0", "1"]
108108
if len(m) == 3 and type(m[2]) is nn.ReLU:
109109
modules_to_fuse.append("2")

0 commit comments

Comments
 (0)