|
24 | 24 | os.path.dirname(os.path.abspath(__file__)), 'assets', 'grace_hopper_517x606.jpg')
|
25 | 25 |
|
26 | 26 |
|
| 27 | +def cycle_over(objs): |
| 28 | + objs = list(objs) |
| 29 | + for idx, obj in enumerate(objs): |
| 30 | + yield obj, objs[:idx] + objs[idx + 1:] |
| 31 | + |
| 32 | +def int_dtypes(): |
| 33 | + yield from iter( |
| 34 | + (torch.uint8, torch.int8, torch.int16, torch.short, torch.int32, torch.int, torch.int64, torch.long,) |
| 35 | + ) |
| 36 | + |
| 37 | +def float_dtypes(): |
| 38 | + yield from iter((torch.float32, torch.float, torch.float64, torch.double)) |
| 39 | + |
| 40 | + |
27 | 41 | class Tester(unittest.TestCase):
|
28 | 42 |
|
29 | 43 | def test_crop(self):
|
@@ -502,54 +516,99 @@ def test_to_tensor(self):
|
502 | 516 | output = trans(img)
|
503 | 517 | self.assertTrue(np.allclose(input_data.numpy(), output.numpy()))
|
504 | 518 |
|
505 |
| - def test_convert_image_dtype(self): |
506 |
| - def cycle_over(objs): |
507 |
| - objs = list(objs) |
508 |
| - for idx, obj in enumerate(objs): |
509 |
| - yield obj, objs[:idx] + objs[idx + 1:] |
510 |
| - |
511 |
| - # dtype_max_value = { |
512 |
| - # dtype: 1.0 |
513 |
| - # for dtype in (torch.float32, torch.float, torch.float64, torch.double)#, torch.bool,) |
514 |
| - # # torch.float16 and torch.half are disabled for now since they do not support torch.max |
515 |
| - # # See https://github.com/pytorch/pytorch/issues/28623#issuecomment-611379051 |
516 |
| - # # (torch.float32, torch.float, torch.float64, torch.double, torch.float16, torch.half, torch.bool, ) |
517 |
| - # } |
518 |
| - dtype_max_value = {} |
519 |
| - dtype_max_value.update( |
520 |
| - { |
521 |
| - dtype: torch.iinfo(dtype).max |
522 |
| - for dtype in ( |
523 |
| - torch.uint8, |
524 |
| - torch.int8, |
525 |
| - torch.int16, |
526 |
| - torch.short, |
527 |
| - torch.int32, |
528 |
| - torch.int, |
529 |
| - torch.int64, |
530 |
| - torch.long, |
531 |
| - ) |
532 |
| - } |
533 |
| - ) |
| 519 | + def test_convert_image_dtype_float_to_float(self): |
| 520 | + for input_dtype, output_dtypes in cycle_over(float_dtypes()): |
| 521 | + input_image = torch.tensor((0.0, 1.0), dtype=input_dtype) |
| 522 | + for output_dtype in output_dtypes: |
| 523 | + with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype): |
| 524 | + transform = transforms.ConvertImageDtype(output_dtype) |
| 525 | + output_image = transform(input_image) |
| 526 | + |
| 527 | + actual_min, actual_max = output_image.tolist() |
| 528 | + desired_min, desired_max = 0.0, 1.0 |
| 529 | + |
| 530 | + self.assertAlmostEqual(actual_min, desired_min) |
| 531 | + self.assertAlmostEqual(actual_max, desired_max) |
| 532 | + |
| 533 | + def test_convert_image_dtype_float_to_int(self): |
| 534 | + for input_dtype in float_dtypes(): |
| 535 | + input_image = torch.tensor((0.0, 1.0), dtype=input_dtype) |
| 536 | + for output_dtype in int_dtypes(): |
| 537 | + with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype): |
| 538 | + transform = transforms.ConvertImageDtype(output_dtype) |
| 539 | + |
| 540 | + if (input_dtype == torch.float32 and output_dtype in (torch.int32, torch.int64)) or ( |
| 541 | + input_dtype == torch.float64 and output_dtype == torch.int64 |
| 542 | + ): |
| 543 | + with self.assertRaises(RuntimeError): |
| 544 | + transform(input_image) |
| 545 | + else: |
| 546 | + output_image = transform(input_image) |
534 | 547 |
|
535 |
| - for input_dtype, output_dtypes in cycle_over(dtype_max_value.keys()): |
536 |
| - input_image = torch.ones(1, dtype=input_dtype) * dtype_max_value[input_dtype] |
| 548 | + actual_min, actual_max = output_image.tolist() |
| 549 | + desired_min, desired_max = 0, torch.iinfo(output_dtype).max |
537 | 550 |
|
| 551 | + self.assertEqual(actual_min, desired_min) |
| 552 | + self.assertEqual(actual_max, desired_max) |
| 553 | + |
| 554 | + def test_convert_image_dtype_int_to_float(self): |
| 555 | + for input_dtype in int_dtypes(): |
| 556 | + input_image = torch.tensor((0, torch.iinfo(input_dtype).max), dtype=input_dtype) |
| 557 | + for output_dtype in float_dtypes(): |
| 558 | + with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype): |
| 559 | + transform = transforms.ConvertImageDtype(output_dtype) |
| 560 | + output_image = transform(input_image) |
| 561 | + |
| 562 | + actual_min, actual_max = output_image.tolist() |
| 563 | + desired_min, desired_max = 0.0, 1.0 |
| 564 | + |
| 565 | + self.assertAlmostEqual(actual_min, desired_min) |
| 566 | + self.assertGreaterEqual(actual_min, desired_min) |
| 567 | + self.assertAlmostEqual(actual_max, desired_max) |
| 568 | + self.assertLessEqual(actual_max, desired_max) |
| 569 | + |
| 570 | + def test_convert_image_dtype_int_to_int(self): |
| 571 | + for input_dtype, output_dtypes in cycle_over(int_dtypes()): |
| 572 | + input_max = torch.iinfo(input_dtype).max |
| 573 | + input_image = torch.tensor((0, input_max), dtype=input_dtype) |
538 | 574 | for output_dtype in output_dtypes:
|
| 575 | + output_max = torch.iinfo(output_dtype).max |
| 576 | + |
539 | 577 | with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype):
|
540 | 578 | transform = transforms.ConvertImageDtype(output_dtype)
|
541 | 579 | output_image = transform(input_image)
|
542 | 580 |
|
543 |
| - actual = output_image.dtype |
544 |
| - desired = output_dtype |
545 |
| - self.assertEqual(actual, desired) |
| 581 | + actual_min, actual_max = output_image.tolist() |
| 582 | + desired_min, desired_max = 0, output_max |
546 | 583 |
|
547 |
| - actual = torch.max(output_image).item() |
548 |
| - desired = dtype_max_value[output_dtype] |
549 |
| - if output_dtype.is_floating_point: |
550 |
| - self.assertAlmostEqual(actual, desired) |
| 584 | + # see https://github.com/pytorch/vision/pull/2078#issuecomment-641036236 for details |
| 585 | + if input_max >= output_max: |
| 586 | + error_term = 0 |
551 | 587 | else:
|
552 |
| - self.assertEqual(actual, desired) |
| 588 | + error_term = 1 - (torch.iinfo(output_dtype).max + 1) // (torch.iinfo(input_dtype).max + 1) |
| 589 | + |
| 590 | + self.assertEqual(actual_min, desired_min) |
| 591 | + self.assertEqual(actual_max, desired_max + error_term) |
| 592 | + |
| 593 | + def test_convert_image_dtype_int_to_int_consistency(self): |
| 594 | + for input_dtype, output_dtypes in cycle_over(int_dtypes()): |
| 595 | + input_max = torch.iinfo(input_dtype).max |
| 596 | + input_image = torch.tensor((0, input_max), dtype=input_dtype) |
| 597 | + for output_dtype in output_dtypes: |
| 598 | + output_max = torch.iinfo(output_dtype).max |
| 599 | + if output_max <= input_max: |
| 600 | + continue |
| 601 | + |
| 602 | + with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype): |
| 603 | + transform = transforms.ConvertImageDtype(output_dtype) |
| 604 | + inverse_transfrom = transforms.ConvertImageDtype(input_dtype) |
| 605 | + output_image = inverse_transfrom(transform(input_image)) |
| 606 | + |
| 607 | + actual_min, actual_max = output_image.tolist() |
| 608 | + desired_min, desired_max = 0, input_max |
| 609 | + |
| 610 | + self.assertEqual(actual_min, desired_min) |
| 611 | + self.assertEqual(actual_max, desired_max) |
553 | 612 |
|
554 | 613 | @unittest.skipIf(accimage is None, 'accimage not available')
|
555 | 614 | def test_accimage_to_tensor(self):
|
|
0 commit comments