Skip to content

Commit 9181496

Browse files
NicolasHugfacebook-github-bot
authored andcommitted
Revert D36324783 for torchvision
Reviewed By: datumbox Differential Revision: D36413353 fbshipit-source-id: 57a1e27fa08eb7699695feb999ef9e6843524586
1 parent ba189a2 commit 9181496

20 files changed

+35
-30
lines changed

references/optical_flow/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ def log_every(self, iterable, print_freq=5, header=None):
158158
def compute_metrics(flow_pred, flow_gt, valid_flow_mask=None):
159159

160160
epe = ((flow_pred - flow_gt) ** 2).sum(dim=1).sqrt()
161-
flow_norm = (flow_gt**2).sum(dim=1).sqrt()
161+
flow_norm = (flow_gt ** 2).sum(dim=1).sqrt()
162162

163163
if valid_flow_mask is not None:
164164
epe = epe[valid_flow_mask]
@@ -183,7 +183,7 @@ def sequence_loss(flow_preds, flow_gt, valid_flow_mask, gamma=0.8, max_flow=400)
183183
raise ValueError(f"Gamma should be < 1, got {gamma}.")
184184

185185
# exlude invalid pixels and extremely large diplacements
186-
flow_norm = torch.sum(flow_gt**2, dim=1).sqrt()
186+
flow_norm = torch.sum(flow_gt ** 2, dim=1).sqrt()
187187
valid_flow_mask = valid_flow_mask & (flow_norm < max_flow)
188188

189189
valid_flow_mask = valid_flow_mask[:, None, :, :]

references/segmentation/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ def update(self, a, b):
7575
with torch.inference_mode():
7676
k = (a >= 0) & (a < n)
7777
inds = n * a[k].to(torch.int64) + b[k]
78-
self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n)
78+
self.mat += torch.bincount(inds, minlength=n ** 2).reshape(n, n)
7979

8080
def reset(self):
8181
self.mat.zero_()

test/test_image.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@ def test_decode_png(img_path, pil_mode, mode):
168168
img_lpng = _read_png_16(img_path, mode=mode)
169169
assert img_lpng.dtype == torch.int32
170170
# PIL converts 16 bits pngs in uint8
171-
img_lpng = torch.round(img_lpng / (2**16 - 1) * 255).to(torch.uint8)
171+
img_lpng = torch.round(img_lpng / (2 ** 16 - 1) * 255).to(torch.uint8)
172172
else:
173173
data = read_file(img_path)
174174
img_lpng = decode_image(data, mode=mode)

test/test_models_detection_negative_samples.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ def test_assign_targets_to_proposals(self):
6060

6161
resolution = box_roi_pool.output_size[0]
6262
representation_size = 1024
63-
box_head = TwoMLPHead(4 * resolution**2, representation_size)
63+
box_head = TwoMLPHead(4 * resolution ** 2, representation_size)
6464

6565
representation_size = 1024
6666
box_predictor = FastRCNNPredictor(representation_size, 2)

test/test_onnx.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -265,7 +265,7 @@ def _init_test_roi_heads_faster_rcnn(self):
265265

266266
resolution = box_roi_pool.output_size[0]
267267
representation_size = 1024
268-
box_head = TwoMLPHead(out_channels * resolution**2, representation_size)
268+
box_head = TwoMLPHead(out_channels * resolution ** 2, representation_size)
269269

270270
representation_size = 1024
271271
box_predictor = FastRCNNPredictor(representation_size, num_classes)

test/test_ops.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ def test_forward(self, device, contiguous, x_dtype=None, rois_dtype=None, **kwar
7979
rois_dtype = self.dtype if rois_dtype is None else rois_dtype
8080
pool_size = 5
8181
# n_channels % (pool_size ** 2) == 0 required for PS opeartions.
82-
n_channels = 2 * (pool_size**2)
82+
n_channels = 2 * (pool_size ** 2)
8383
x = torch.rand(2, n_channels, 10, 10, dtype=x_dtype, device=device)
8484
if not contiguous:
8585
x = x.permute(0, 1, 3, 2)
@@ -115,7 +115,7 @@ def test_is_leaf_node(self, device):
115115
def test_backward(self, seed, device, contiguous):
116116
torch.random.manual_seed(seed)
117117
pool_size = 2
118-
x = torch.rand(1, 2 * (pool_size**2), 5, 5, dtype=self.dtype, device=device, requires_grad=True)
118+
x = torch.rand(1, 2 * (pool_size ** 2), 5, 5, dtype=self.dtype, device=device, requires_grad=True)
119119
if not contiguous:
120120
x = x.permute(0, 1, 3, 2)
121121
rois = torch.tensor(

test/test_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@ def test_normalize_in_make_grid():
4545

4646
# Rounding the result to one decimal for comparison
4747
n_digits = 1
48-
rounded_grid_max = torch.round(grid_max * 10**n_digits) / (10**n_digits)
49-
rounded_grid_min = torch.round(grid_min * 10**n_digits) / (10**n_digits)
48+
rounded_grid_max = torch.round(grid_max * 10 ** n_digits) / (10 ** n_digits)
49+
rounded_grid_min = torch.round(grid_min * 10 ** n_digits) / (10 ** n_digits)
5050

5151
assert_equal(norm_max, rounded_grid_max, msg="Normalized max is not equal to 1")
5252
assert_equal(norm_min, rounded_grid_min, msg="Normalized min is not equal to 0")

torchvision/datasets/_optical_flow.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -467,7 +467,7 @@ def _read_16bits_png_with_flow_and_valid_mask(file_name):
467467

468468
flow_and_valid = _read_png_16(file_name).to(torch.float32)
469469
flow, valid_flow_mask = flow_and_valid[:2, :, :], flow_and_valid[2, :, :]
470-
flow = (flow - 2**15) / 64 # This conversion is explained somewhere on the kitti archive
470+
flow = (flow - 2 ** 15) / 64 # This conversion is explained somewhere on the kitti archive
471471
valid_flow_mask = valid_flow_mask.bool()
472472

473473
# For consistency with other datasets, we convert to numpy

torchvision/extension.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ def _has_ops():
2323
def _has_ops(): # noqa: F811
2424
return True
2525

26+
2627
except (ImportError, OSError):
2728
pass
2829

torchvision/io/video.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -176,9 +176,9 @@ def _read_from_stream(
176176
# can't use regex directly because of some weird characters sometimes...
177177
pos = extradata.find(b"DivX")
178178
d = extradata[pos:]
179-
o = re.search(rb"DivX(\d+)Build(\d+)(\w)", d)
179+
o = re.search(br"DivX(\d+)Build(\d+)(\w)", d)
180180
if o is None:
181-
o = re.search(rb"DivX(\d+)b(\d+)(\w)", d)
181+
o = re.search(br"DivX(\d+)b(\d+)(\w)", d)
182182
if o is not None:
183183
should_buffer = o.group(3) == b"p"
184184
seek_offset = start_offset

torchvision/io/video_reader.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
def _has_video_opt() -> bool:
1818
return True
1919

20+
2021
else:
2122

2223
def _has_video_opt() -> bool:

torchvision/models/detection/faster_rcnn.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,7 @@ def __init__(
250250
if box_head is None:
251251
resolution = box_roi_pool.output_size[0]
252252
representation_size = 1024
253-
box_head = TwoMLPHead(out_channels * resolution**2, representation_size)
253+
box_head = TwoMLPHead(out_channels * resolution ** 2, representation_size)
254254

255255
if box_predictor is None:
256256
representation_size = 1024

torchvision/models/detection/rpn.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -322,12 +322,15 @@ def compute_loss(
322322
labels = torch.cat(labels, dim=0)
323323
regression_targets = torch.cat(regression_targets, dim=0)
324324

325-
box_loss = F.smooth_l1_loss(
326-
pred_bbox_deltas[sampled_pos_inds],
327-
regression_targets[sampled_pos_inds],
328-
beta=1 / 9,
329-
reduction="sum",
330-
) / (sampled_inds.numel())
325+
box_loss = (
326+
F.smooth_l1_loss(
327+
pred_bbox_deltas[sampled_pos_inds],
328+
regression_targets[sampled_pos_inds],
329+
beta=1 / 9,
330+
reduction="sum",
331+
)
332+
/ (sampled_inds.numel())
333+
)
331334

332335
objectness_loss = F.binary_cross_entropy_with_logits(objectness[sampled_inds], labels[sampled_inds])
333336

torchvision/models/swin_transformer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -326,7 +326,7 @@ def __init__(
326326
# build SwinTransformer blocks
327327
for i_stage in range(len(depths)):
328328
stage: List[nn.Module] = []
329-
dim = embed_dim * 2**i_stage
329+
dim = embed_dim * 2 ** i_stage
330330
for i_layer in range(depths[i_stage]):
331331
# adjust stochastic depth probability based on the depth of the stage block
332332
sd_prob = stochastic_depth_prob * float(stage_block_id) / (total_stage_blocks - 1)

torchvision/ops/boxes.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -353,7 +353,7 @@ def complete_box_iou(boxes1: Tensor, boxes2: Tensor, eps: float = 1e-7) -> Tenso
353353
w_gt = boxes2[:, 2] - boxes2[:, 0]
354354
h_gt = boxes2[:, 3] - boxes2[:, 1]
355355

356-
v = (4 / (torch.pi**2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2)
356+
v = (4 / (torch.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2)
357357
with torch.no_grad():
358358
alpha = v / (1 - iou + v + eps)
359359
return iou - (centers_distance_squared / diagonal_distance_squared) - alpha * v

torchvision/ops/ciou_loss.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ def complete_box_iou_loss(
7979
h_pred = y2 - y1
8080
w_gt = x2g - x1g
8181
h_gt = y2g - y1g
82-
v = (4 / (torch.pi**2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2)
82+
v = (4 / (torch.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2)
8383
with torch.no_grad():
8484
alpha = v / (1 - iou + v + eps)
8585

torchvision/ops/drop_block.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def drop_block2d(
3737
N, C, H, W = input.size()
3838
block_size = min(block_size, W, H)
3939
# compute the gamma of Bernoulli distribution
40-
gamma = (p * H * W) / ((block_size**2) * ((H - block_size + 1) * (W - block_size + 1)))
40+
gamma = (p * H * W) / ((block_size ** 2) * ((H - block_size + 1) * (W - block_size + 1)))
4141
noise = torch.empty((N, C, H - block_size + 1, W - block_size + 1), dtype=input.dtype, device=input.device)
4242
noise.bernoulli_(gamma)
4343

@@ -83,7 +83,7 @@ def drop_block3d(
8383
N, C, D, H, W = input.size()
8484
block_size = min(block_size, D, H, W)
8585
# compute the gamma of Bernoulli distribution
86-
gamma = (p * D * H * W) / ((block_size**3) * ((D - block_size + 1) * (H - block_size + 1) * (W - block_size + 1)))
86+
gamma = (p * D * H * W) / ((block_size ** 3) * ((D - block_size + 1) * (H - block_size + 1) * (W - block_size + 1)))
8787
noise = torch.empty(
8888
(N, C, D - block_size + 1, H - block_size + 1, W - block_size + 1), dtype=input.dtype, device=input.device
8989
)

torchvision/prototype/datasets/_builtin/coco.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ def _decode_captions_ann(self, anns: List[Dict[str, Any]], image_meta: Dict[str,
151151
)
152152

153153
_META_FILE_PATTERN = re.compile(
154-
rf"(?P<annotations>({'|'.join(_ANN_DECODERS.keys())}))_(?P<split>[a-zA-Z]+)(?P<year>\d+)[.]json"
154+
fr"(?P<annotations>({'|'.join(_ANN_DECODERS.keys())}))_(?P<split>[a-zA-Z]+)(?P<year>\d+)[.]json"
155155
)
156156

157157
def _filter_meta_files(self, data: Tuple[str, Any]) -> bool:

torchvision/transforms/functional_tensor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -247,7 +247,7 @@ def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
247247
if not torch.is_floating_point(img):
248248
result = convert_image_dtype(result, torch.float32)
249249

250-
result = (gain * result**gamma).clamp(0, 1)
250+
result = (gain * result ** gamma).clamp(0, 1)
251251

252252
result = convert_image_dtype(result, dtype)
253253
return result

torchvision/utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -430,7 +430,7 @@ def flow_to_image(flow: torch.Tensor) -> torch.Tensor:
430430
if flow.ndim != 4 or flow.shape[1] != 2:
431431
raise ValueError(f"Input flow should have shape (2, H, W) or (N, 2, H, W), got {orig_shape}.")
432432

433-
max_norm = torch.sum(flow**2, dim=1).sqrt().max()
433+
max_norm = torch.sum(flow ** 2, dim=1).sqrt().max()
434434
epsilon = torch.finfo((flow).dtype).eps
435435
normalized_flow = flow / (max_norm + epsilon)
436436
img = _normalized_flow_to_image(normalized_flow)
@@ -457,7 +457,7 @@ def _normalized_flow_to_image(normalized_flow: torch.Tensor) -> torch.Tensor:
457457
flow_image = torch.zeros((N, 3, H, W), dtype=torch.uint8, device=device)
458458
colorwheel = _make_colorwheel().to(device) # shape [55x3]
459459
num_cols = colorwheel.shape[0]
460-
norm = torch.sum(normalized_flow**2, dim=1).sqrt()
460+
norm = torch.sum(normalized_flow ** 2, dim=1).sqrt()
461461
a = torch.atan2(-normalized_flow[:, 1, :, :], -normalized_flow[:, 0, :, :]) / torch.pi
462462
fk = (a + 1) / 2 * (num_cols - 1)
463463
k0 = torch.floor(fk).to(torch.long)
@@ -523,7 +523,7 @@ def _make_colorwheel() -> torch.Tensor:
523523

524524

525525
def _generate_color_palette(num_objects: int):
526-
palette = torch.tensor([2**25 - 1, 2**15 - 1, 2**21 - 1])
526+
palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])
527527
return [tuple((i * palette) % 255) for i in range(num_objects)]
528528

529529

0 commit comments

Comments
 (0)