-
Notifications
You must be signed in to change notification settings - Fork 7.1k
[proto] Small optim for perspective op on images #6907
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 3 commits
5acb309
7a9c318
70e4571
a91b646
8fc5866
2b858e0
f8eb648
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -906,6 +906,36 @@ def crop(inpt: features.InputTypeJIT, top: int, left: int, height: int, width: i | |
return crop_image_pil(inpt, top, left, height, width) | ||
|
||
|
||
def _perspective_grid(coeffs: List[float], ow: int, oh: int, dtype: torch.dtype, device: torch.device) -> torch.Tensor: | ||
# https://github.com/python-pillow/Pillow/blob/4634eafe3c695a014267eefdce830b4a825beed7/ | ||
# src/libImaging/Geometry.c#L394 | ||
|
||
# | ||
# x_out = (coeffs[0] * x + coeffs[1] * y + coeffs[2]) / (coeffs[6] * x + coeffs[7] * y + 1) | ||
# y_out = (coeffs[3] * x + coeffs[4] * y + coeffs[5]) / (coeffs[6] * x + coeffs[7] * y + 1) | ||
# | ||
|
||
theta1 = torch.tensor( | ||
[[[coeffs[0], coeffs[1], coeffs[2]], [coeffs[3], coeffs[4], coeffs[5]]]], dtype=dtype, device=device | ||
) | ||
theta2 = torch.tensor([[[coeffs[6], coeffs[7], 1.0], [coeffs[6], coeffs[7], 1.0]]], dtype=dtype, device=device) | ||
|
||
d = 0.5 | ||
base_grid = torch.empty(1, oh, ow, 3, dtype=dtype, device=device) | ||
x_grid = torch.linspace(d, ow * 1.0 + d - 1.0, steps=ow, device=device) | ||
base_grid[..., 0].copy_(x_grid) | ||
y_grid = torch.linspace(d, oh * 1.0 + d - 1.0, steps=oh, device=device).unsqueeze_(-1) | ||
base_grid[..., 1].copy_(y_grid) | ||
base_grid[..., 2].fill_(1) | ||
|
||
rescaled_theta1 = theta1.transpose(1, 2) / torch.tensor([0.5 * ow, 0.5 * oh], dtype=dtype, device=device) | ||
output_grid1 = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta1) | ||
output_grid2 = base_grid.view(1, oh * ow, 3).bmm(theta2.transpose(1, 2)) | ||
|
||
output_grid = output_grid1.div_(output_grid2).sub_(1.0) | ||
return output_grid.view(1, oh, ow, 2) | ||
|
||
|
||
def perspective_image_tensor( | ||
image: torch.Tensor, | ||
perspective_coeffs: List[float], | ||
|
@@ -923,7 +953,19 @@ def perspective_image_tensor( | |
else: | ||
needs_unsquash = False | ||
|
||
output = _FT.perspective(image, perspective_coeffs, interpolation=interpolation.value, fill=fill) | ||
_FT._assert_grid_transform_inputs( | ||
image, | ||
matrix=None, | ||
interpolation=interpolation.value, | ||
fill=fill, | ||
supported_interpolation_modes=["nearest", "bilinear"], | ||
coeffs=perspective_coeffs, | ||
) | ||
|
||
ow, oh = image.shape[-1], image.shape[-2] | ||
dtype = image.dtype if torch.is_floating_point(image) else torch.float32 | ||
grid = _perspective_grid(perspective_coeffs, ow=ow, oh=oh, dtype=dtype, device=image.device) | ||
output = _FT._apply_grid_transform(image, grid, interpolation.value, fill=fill) | ||
|
||
if needs_unsquash: | ||
output = output.reshape(shape) | ||
|
@@ -988,16 +1030,16 @@ def perspective_bounding_box( | |
(-perspective_coeffs[0] * perspective_coeffs[7] + perspective_coeffs[1] * perspective_coeffs[6]) / denom, | ||
] | ||
|
||
theta12_T = torch.tensor( | ||
[ | ||
[inv_coeffs[0], inv_coeffs[3], inv_coeffs[6], inv_coeffs[6]], | ||
[inv_coeffs[1], inv_coeffs[4], inv_coeffs[7], inv_coeffs[7]], | ||
[inv_coeffs[2], inv_coeffs[5], 1.0, 1.0], | ||
], | ||
theta1 = torch.tensor( | ||
[[inv_coeffs[0], inv_coeffs[1], inv_coeffs[2]], [inv_coeffs[3], inv_coeffs[4], inv_coeffs[5]]], | ||
dtype=dtype, | ||
device=device, | ||
) | ||
|
||
theta2 = torch.tensor( | ||
[[inv_coeffs[6], inv_coeffs[7], 1.0], [inv_coeffs[6], inv_coeffs[7], 1.0]], dtype=dtype, device=device | ||
) | ||
|
||
# 1) Let's transform bboxes into a tensor of 4 points (top-left, top-right, bottom-left, bottom-right corners). | ||
# Tensor of points has shape (N * 4, 3), where N is the number of bboxes | ||
# Single point structure is similar to | ||
|
@@ -1008,9 +1050,8 @@ def perspective_bounding_box( | |
# x_out = (coeffs[0] * x + coeffs[1] * y + coeffs[2]) / (coeffs[6] * x + coeffs[7] * y + 1) | ||
# y_out = (coeffs[3] * x + coeffs[4] * y + coeffs[5]) / (coeffs[6] * x + coeffs[7] * y + 1) | ||
|
||
numer_denom_points = torch.matmul(points, theta12_T) | ||
numer_points = numer_denom_points[:, :2] | ||
vfdev-5 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
denom_points = numer_denom_points[:, 2:] | ||
numer_points = torch.matmul(points, theta1.T) | ||
denom_points = torch.matmul(points, theta2.T) | ||
Comment on lines
+1077
to
+1078
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just to understand, previously when we measured this specific change for bboxes it didn't improve the speed? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I measured all changes together: concat+single matmul + inplace + aminmax There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. IIUC, it improved the speed for bounding boxes, but the PR didn't measure the impact on images. Since bounding box tensors are usually a lot smaller than images, the perf regression for images was not noticed there. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There were no perf regression at all as it was using previous implementation. For bboxes There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. OK if you measured it now and you know it doesn't slow us down it's fine by me. |
||
transformed_points = numer_points.div_(denom_points) | ||
|
||
# 3) Reshape transformed points to [N boxes, 4 points, x/y coords] | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can we do in-place division?