@@ -40,7 +40,7 @@ def resize_image_tensor(
40
40
antialias : Optional [bool ] = None ,
41
41
) -> torch .Tensor :
42
42
new_height , new_width = size
43
- old_height , old_width = _FT .get_image_size (image )
43
+ old_width , old_height = _FT .get_image_size (image )
44
44
num_channels = _FT .get_image_num_channels (image )
45
45
batch_shape = image .shape [:- 3 ]
46
46
return _FT .resize (
@@ -143,7 +143,7 @@ def affine_image_tensor(
143
143
144
144
center_f = [0.0 , 0.0 ]
145
145
if center is not None :
146
- height , width = get_image_size (img )
146
+ width , height = get_image_size (img )
147
147
# Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
148
148
center_f = [1.0 * (c - s * 0.5 ) for c , s in zip (center , (width , height ))]
149
149
@@ -169,7 +169,7 @@ def affine_image_pil(
169
169
# it is visually better to estimate the center without 0.5 offset
170
170
# otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine
171
171
if center is None :
172
- height , width = get_image_size (img )
172
+ width , height = get_image_size (img )
173
173
center = [width * 0.5 , height * 0.5 ]
174
174
matrix = _get_inverse_affine_matrix (center , angle , translate , scale , shear )
175
175
@@ -186,7 +186,7 @@ def rotate_image_tensor(
186
186
) -> torch .Tensor :
187
187
center_f = [0.0 , 0.0 ]
188
188
if center is not None :
189
- height , width = get_image_size (img )
189
+ width , height = get_image_size (img )
190
190
# Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
191
191
center_f = [1.0 * (c - s * 0.5 ) for c , s in zip (center , (width , height ))]
192
192
@@ -262,13 +262,13 @@ def _center_crop_compute_crop_anchor(
262
262
263
263
def center_crop_image_tensor (img : torch .Tensor , output_size : List [int ]) -> torch .Tensor :
264
264
crop_height , crop_width = _center_crop_parse_output_size (output_size )
265
- image_height , image_width = get_image_size (img )
265
+ image_width , image_height = get_image_size (img )
266
266
267
267
if crop_height > image_height or crop_width > image_width :
268
268
padding_ltrb = _center_crop_compute_padding (crop_height , crop_width , image_height , image_width )
269
269
img = pad_image_tensor (img , padding_ltrb , fill = 0 )
270
270
271
- image_height , image_width = get_image_size (img )
271
+ image_width , image_height = get_image_size (img )
272
272
if crop_width == image_width and crop_height == image_height :
273
273
return img
274
274
@@ -278,13 +278,13 @@ def center_crop_image_tensor(img: torch.Tensor, output_size: List[int]) -> torch
278
278
279
279
def center_crop_image_pil (img : PIL .Image .Image , output_size : List [int ]) -> PIL .Image .Image :
280
280
crop_height , crop_width = _center_crop_parse_output_size (output_size )
281
- image_height , image_width = get_image_size (img )
281
+ image_width , image_height = get_image_size (img )
282
282
283
283
if crop_height > image_height or crop_width > image_width :
284
284
padding_ltrb = _center_crop_compute_padding (crop_height , crop_width , image_height , image_width )
285
285
img = pad_image_pil (img , padding_ltrb , fill = 0 )
286
286
287
- image_height , image_width = get_image_size (img )
287
+ image_width , image_height = get_image_size (img )
288
288
if crop_width == image_width and crop_height == image_height :
289
289
return img
290
290
0 commit comments