From d077c25502024ba2e3b5ecd07e198d9a24e98a94 Mon Sep 17 00:00:00 2001 From: Toni Blaslov Date: Wed, 9 Nov 2022 15:57:57 +0000 Subject: [PATCH 01/15] Adding FLOPs and size to model metadata --- docs/source/conf.py | 41 +++++++---- test/test_extended_models.py | 15 ++-- torchvision/models/alexnet.py | 2 + torchvision/models/convnext.py | 8 +++ torchvision/models/densenet.py | 10 ++- torchvision/models/detection/faster_rcnn.py | 8 +++ torchvision/models/detection/fcos.py | 2 + torchvision/models/detection/keypoint_rcnn.py | 4 ++ torchvision/models/detection/mask_rcnn.py | 4 ++ torchvision/models/detection/retinanet.py | 4 ++ torchvision/models/detection/ssd.py | 2 + torchvision/models/detection/ssdlite.py | 2 + torchvision/models/efficientnet.py | 24 +++++++ torchvision/models/googlenet.py | 2 + torchvision/models/inception.py | 2 + torchvision/models/maxvit.py | 2 + torchvision/models/mnasnet.py | 8 +++ torchvision/models/mobilenetv2.py | 4 ++ torchvision/models/mobilenetv3.py | 6 ++ torchvision/models/optical_flow/raft.py | 16 +++++ torchvision/models/regnet.py | 68 +++++++++++++++++++ torchvision/models/resnet.py | 34 ++++++++++ torchvision/models/segmentation/deeplabv3.py | 6 ++ torchvision/models/segmentation/fcn.py | 4 ++ torchvision/models/segmentation/lraspp.py | 2 + torchvision/models/shufflenetv2.py | 8 +++ torchvision/models/squeezenet.py | 4 ++ torchvision/models/swin_transformer.py | 12 ++++ torchvision/models/vgg.py | 18 +++++ torchvision/models/video/mvit.py | 4 ++ torchvision/models/video/resnet.py | 6 ++ torchvision/models/video/s3d.py | 2 + torchvision/models/vision_transformer.py | 20 ++++++ 33 files changed, 335 insertions(+), 19 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 231d3cad416..2c258edf89d 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -375,7 +375,9 @@ def inject_weight_metadata(app, what, name, obj, options, lines): lines.append("") -def generate_weights_table(module, table_name, metrics, dataset, include_patterns=None, exclude_patterns=None): +def generate_weights_table( + module, table_name, metrics, dataset, include_patterns=None, exclude_patterns=None, exclude_columns=[] +): weights_endswith = "_QuantizedWeights" if module.__name__.split(".")[-1] == "quantization" else "_Weights" weight_enums = [getattr(module, name) for name in dir(module) if name.endswith(weights_endswith)] weights = [w for weight_enum in weight_enums for w in weight_enum] @@ -385,27 +387,41 @@ def generate_weights_table(module, table_name, metrics, dataset, include_pattern if exclude_patterns is not None: weights = [w for w in weights if all(p not in str(w) for p in exclude_patterns)] + rich_metadata = ["GFLOPs", "Size (MB)"] + metrics_keys, metrics_names = zip(*metrics) - column_names = ["Weight"] + list(metrics_names) + ["Params", "Recipe"] - column_names = [f"**{name}**" for name in column_names] # Add bold + column_names = ["Weight"] + list(metrics_names) + ["Params"] + rich_metadata + ["Recipe"] # Final column order + column_names_table = [f"**{name}**" for name in column_names if name not in exclude_columns] # Add bold + + column_patterns = list( + zip( + column_names, + [lambda w: f":class:`{w} <{type(w).__name__}>`"] + + [lambda w, metric=m: w.meta["_metrics"][dataset][metric] for m in metrics_keys] + + [ + lambda w: f"{w.meta['num_params']/1e6:.1f}M", + lambda w: f"{w.meta['_flops']:.3f}", + lambda w: f"{round(w.meta['_weight_size'], 1):.1f}", + lambda w: f"`link <{w.meta['recipe']}>`__", + ], + ) + ) content = [ - ( - f":class:`{w} <{type(w).__name__}>`", - *(w.meta["_metrics"][dataset][metric] for metric in metrics_keys), - f"{w.meta['num_params']/1e6:.1f}M", - f"`link <{w.meta['recipe']}>`__", - ) - for w in weights + [pattern(w) for col_name, pattern in column_patterns if col_name not in exclude_columns] for w in weights ] - table = tabulate(content, headers=column_names, tablefmt="rst") + + column_widths = zip(column_names, ["120"] + ["18"] * len(metrics_names) + ["18", "18", "18", "10"]) + widths_table = " ".join(width for col_name, width in column_widths if col_name not in exclude_columns) + + table = tabulate(content, headers=column_names_table, tablefmt="rst") generated_dir = Path("generated") generated_dir.mkdir(exist_ok=True) with open(generated_dir / f"{table_name}_table.rst", "w+") as table_file: table_file.write(".. rst-class:: table-weights\n") # Custom CSS class, see custom_torchvision.css table_file.write(".. table::\n") - table_file.write(f" :widths: 100 {'20 ' * len(metrics_names)} 20 10\n\n") + table_file.write(f" :widths: {widths_table} \n\n") table_file.write(f"{textwrap.indent(table, ' ' * 4)}\n\n") @@ -417,6 +433,7 @@ def generate_weights_table(module, table_name, metrics, dataset, include_pattern table_name="classification_quant", metrics=[("acc@1", "Acc@1"), ("acc@5", "Acc@5")], dataset="ImageNet-1K", + exclude_columns=["GFLOPs", "Size (MB)"], ) generate_weights_table( module=M.detection, diff --git a/test/test_extended_models.py b/test/test_extended_models.py index 2cd8a568113..6948989c9e8 100644 --- a/test/test_extended_models.py +++ b/test/test_extended_models.py @@ -155,21 +155,26 @@ def test_schema_meta_validation(model_fn): "recipe", "unquantized", "_docs", + "_flops", + "_weight_size", } # mandatory fields for each computer vision task classification_fields = {"categories", ("_metrics", "ImageNet-1K", "acc@1"), ("_metrics", "ImageNet-1K", "acc@5")} + rich_metadata = {"_flops", "_weight_size"} defaults = { "all": {"_metrics", "min_size", "num_params", "recipe", "_docs"}, - "models": classification_fields, - "detection": {"categories", ("_metrics", "COCO-val2017", "box_map")}, + "models": classification_fields | rich_metadata, + "detection": {"categories", ("_metrics", "COCO-val2017", "box_map")} | rich_metadata, "quantization": classification_fields | {"backend", "unquantized"}, "segmentation": { "categories", ("_metrics", "COCO-val2017-VOC-labels", "miou"), ("_metrics", "COCO-val2017-VOC-labels", "pixel_acc"), - }, - "video": {"categories", ("_metrics", "Kinetics-400", "acc@1"), ("_metrics", "Kinetics-400", "acc@5")}, - "optical_flow": set(), + } + | rich_metadata, + "video": {"categories", ("_metrics", "Kinetics-400", "acc@1"), ("_metrics", "Kinetics-400", "acc@5")} + | rich_metadata, + "optical_flow": rich_metadata, } model_name = model_fn.__name__ module_name = model_fn.__module__.split(".")[-2] diff --git a/torchvision/models/alexnet.py b/torchvision/models/alexnet.py index 328f978ba11..cc8fbb6c761 100644 --- a/torchvision/models/alexnet.py +++ b/torchvision/models/alexnet.py @@ -67,6 +67,8 @@ class AlexNet_Weights(WeightsEnum): "acc@5": 79.066, } }, + "_flops": 0.714188, + "_weight_size": 233.086501, "_docs": """ These weights reproduce closely the results of the paper using a simplified training recipe. """, diff --git a/torchvision/models/convnext.py b/torchvision/models/convnext.py index 025baa3d148..c3a1fb81c7a 100644 --- a/torchvision/models/convnext.py +++ b/torchvision/models/convnext.py @@ -219,6 +219,8 @@ class ConvNeXt_Tiny_Weights(WeightsEnum): "acc@5": 96.146, } }, + "_flops": 4.455531, + "_weight_size": 109.118672, }, ) DEFAULT = IMAGENET1K_V1 @@ -237,6 +239,8 @@ class ConvNeXt_Small_Weights(WeightsEnum): "acc@5": 96.650, } }, + "_flops": 8.683712, + "_weight_size": 191.702775, }, ) DEFAULT = IMAGENET1K_V1 @@ -255,6 +259,8 @@ class ConvNeXt_Base_Weights(WeightsEnum): "acc@5": 96.870, } }, + "_flops": 15.354729, + "_weight_size": 338.064286, }, ) DEFAULT = IMAGENET1K_V1 @@ -273,6 +279,8 @@ class ConvNeXt_Large_Weights(WeightsEnum): "acc@5": 96.976, } }, + "_flops": 34.361434, + "_weight_size": 754.537187, }, ) DEFAULT = IMAGENET1K_V1 diff --git a/torchvision/models/densenet.py b/torchvision/models/densenet.py index 9aa5ed176a0..cef35b9b47d 100644 --- a/torchvision/models/densenet.py +++ b/torchvision/models/densenet.py @@ -15,7 +15,6 @@ from ._meta import _IMAGENET_CATEGORIES from ._utils import _ovewrite_named_param, handle_legacy_interface - __all__ = [ "DenseNet", "DenseNet121_Weights", @@ -278,6 +277,8 @@ class DenseNet121_Weights(WeightsEnum): "acc@5": 91.972, } }, + "_flops": 2.834162, + "_weight_size": 30.844645, }, ) DEFAULT = IMAGENET1K_V1 @@ -296,6 +297,8 @@ class DenseNet161_Weights(WeightsEnum): "acc@5": 93.560, } }, + "_flops": 7.727907, + "_weight_size": 110.369482, }, ) DEFAULT = IMAGENET1K_V1 @@ -314,6 +317,8 @@ class DenseNet169_Weights(WeightsEnum): "acc@5": 92.806, } }, + "_flops": 3.359843, + "_weight_size": 54.708029, }, ) DEFAULT = IMAGENET1K_V1 @@ -332,6 +337,8 @@ class DenseNet201_Weights(WeightsEnum): "acc@5": 93.370, } }, + "_flops": 4.291366, + "_weight_size": 77.373247, }, ) DEFAULT = IMAGENET1K_V1 @@ -444,7 +451,6 @@ def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool # The dictionary below is internal implementation detail and will be removed in v0.15 from ._utils import _ModelURLs - model_urls = _ModelURLs( { "densenet121": DenseNet121_Weights.IMAGENET1K_V1.url, diff --git a/torchvision/models/detection/faster_rcnn.py b/torchvision/models/detection/faster_rcnn.py index 9d99fd236c7..6cac8d5f589 100644 --- a/torchvision/models/detection/faster_rcnn.py +++ b/torchvision/models/detection/faster_rcnn.py @@ -388,6 +388,8 @@ class FasterRCNN_ResNet50_FPN_Weights(WeightsEnum): "box_map": 37.0, } }, + "_flops": 134.379721, + "_weight_size": 159.743153, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, ) @@ -407,6 +409,8 @@ class FasterRCNN_ResNet50_FPN_V2_Weights(WeightsEnum): "box_map": 46.7, } }, + "_flops": 280.370729, + "_weight_size": 167.104394, "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""", }, ) @@ -426,6 +430,8 @@ class FasterRCNN_MobileNet_V3_Large_FPN_Weights(WeightsEnum): "box_map": 32.8, } }, + "_flops": 4.493592, + "_weight_size": 74.238593, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, ) @@ -445,6 +451,8 @@ class FasterRCNN_MobileNet_V3_Large_320_FPN_Weights(WeightsEnum): "box_map": 22.8, } }, + "_flops": 0.718998, + "_weight_size": 74.238593, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, ) diff --git a/torchvision/models/detection/fcos.py b/torchvision/models/detection/fcos.py index 2ac71c339a4..9bc78bba81e 100644 --- a/torchvision/models/detection/fcos.py +++ b/torchvision/models/detection/fcos.py @@ -662,6 +662,8 @@ class FCOS_ResNet50_FPN_Weights(WeightsEnum): "box_map": 39.2, } }, + "_flops": 128.207053, + "_weight_size": 123.607730, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, ) diff --git a/torchvision/models/detection/keypoint_rcnn.py b/torchvision/models/detection/keypoint_rcnn.py index c19dd21a5ce..0a565807bd5 100644 --- a/torchvision/models/detection/keypoint_rcnn.py +++ b/torchvision/models/detection/keypoint_rcnn.py @@ -328,6 +328,8 @@ class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum): "kp_map": 61.1, } }, + "_flops": 133.924041, + "_weight_size": 226.053994, "_docs": """ These weights were produced by following a similar training recipe as on the paper but use a checkpoint from an early epoch. @@ -347,6 +349,8 @@ class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum): "kp_map": 65.0, } }, + "_flops": 137.419502, + "_weight_size": 226.053994, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, ) diff --git a/torchvision/models/detection/mask_rcnn.py b/torchvision/models/detection/mask_rcnn.py index 795f9b8f79c..511f0137db4 100644 --- a/torchvision/models/detection/mask_rcnn.py +++ b/torchvision/models/detection/mask_rcnn.py @@ -370,6 +370,8 @@ class MaskRCNN_ResNet50_FPN_Weights(WeightsEnum): "mask_map": 34.6, } }, + "_flops": 134.379721, + "_weight_size": 169.839934, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, ) @@ -390,6 +392,8 @@ class MaskRCNN_ResNet50_FPN_V2_Weights(WeightsEnum): "mask_map": 41.8, } }, + "_flops": 333.577360, + "_weight_size": 177.219453, "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""", }, ) diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index ffa21b14f70..4f08d677ddc 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -690,6 +690,8 @@ class RetinaNet_ResNet50_FPN_Weights(WeightsEnum): "box_map": 36.4, } }, + "_flops": 151.540437, + "_weight_size": 130.267216, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, ) @@ -709,6 +711,8 @@ class RetinaNet_ResNet50_FPN_V2_Weights(WeightsEnum): "box_map": 41.5, } }, + "_flops": 152.238199, + "_weight_size": 146.037091, "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""", }, ) diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index 44102f7ac5a..19d3d9fb6ce 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -39,6 +39,8 @@ class SSD300_VGG16_Weights(WeightsEnum): "box_map": 25.1, } }, + "_flops": 34.858153, + "_weight_size": 135.988447, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, ) diff --git a/torchvision/models/detection/ssdlite.py b/torchvision/models/detection/ssdlite.py index d34795d7286..4a4cd833574 100644 --- a/torchvision/models/detection/ssdlite.py +++ b/torchvision/models/detection/ssdlite.py @@ -198,6 +198,8 @@ class SSDLite320_MobileNet_V3_Large_Weights(WeightsEnum): "box_map": 21.3, } }, + "_flops": 0.583172, + "_weight_size": 13.417583, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, ) diff --git a/torchvision/models/efficientnet.py b/torchvision/models/efficientnet.py index c98eb37f935..1fa7a9907e2 100644 --- a/torchvision/models/efficientnet.py +++ b/torchvision/models/efficientnet.py @@ -464,6 +464,8 @@ class EfficientNet_B0_Weights(WeightsEnum): "acc@5": 93.532, } }, + "_flops": 0.385815, + "_weight_size": 20.450974, "_docs": """These weights are ported from the original paper.""", }, ) @@ -486,6 +488,8 @@ class EfficientNet_B1_Weights(WeightsEnum): "acc@5": 94.186, } }, + "_flops": 0.686803, + "_weight_size": 30.133798, "_docs": """These weights are ported from the original paper.""", }, ) @@ -504,6 +508,8 @@ class EfficientNet_B1_Weights(WeightsEnum): "acc@5": 94.934, } }, + "_flops": 0.686803, + "_weight_size": 30.136422, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -530,6 +536,8 @@ class EfficientNet_B2_Weights(WeightsEnum): "acc@5": 95.310, } }, + "_flops": 1.087529, + "_weight_size": 35.173593, "_docs": """These weights are ported from the original paper.""", }, ) @@ -552,6 +560,8 @@ class EfficientNet_B3_Weights(WeightsEnum): "acc@5": 96.054, } }, + "_flops": 1.827141, + "_weight_size": 47.183904, "_docs": """These weights are ported from the original paper.""", }, ) @@ -574,6 +584,8 @@ class EfficientNet_B4_Weights(WeightsEnum): "acc@5": 96.594, } }, + "_flops": 4.393771, + "_weight_size": 74.489011, "_docs": """These weights are ported from the original paper.""", }, ) @@ -596,6 +608,8 @@ class EfficientNet_B5_Weights(WeightsEnum): "acc@5": 96.628, } }, + "_flops": 10.266385, + "_weight_size": 116.863912, "_docs": """These weights are ported from the original paper.""", }, ) @@ -618,6 +632,8 @@ class EfficientNet_B6_Weights(WeightsEnum): "acc@5": 96.916, } }, + "_flops": 19.067594, + "_weight_size": 165.361524, "_docs": """These weights are ported from the original paper.""", }, ) @@ -640,6 +656,8 @@ class EfficientNet_B7_Weights(WeightsEnum): "acc@5": 96.908, } }, + "_flops": 37.745884, + "_weight_size": 254.675393, "_docs": """These weights are ported from the original paper.""", }, ) @@ -664,6 +682,8 @@ class EfficientNet_V2_S_Weights(WeightsEnum): "acc@5": 96.878, } }, + "_flops": 8.365617, + "_weight_size": 82.703832, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -692,6 +712,8 @@ class EfficientNet_V2_M_Weights(WeightsEnum): "acc@5": 97.156, } }, + "_flops": 24.582460, + "_weight_size": 208.010186, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -723,6 +745,8 @@ class EfficientNet_V2_L_Weights(WeightsEnum): "acc@5": 97.788, } }, + "_flops": 56.079699, + "_weight_size": 454.573071, "_docs": """These weights are ported from the original paper.""", }, ) diff --git a/torchvision/models/googlenet.py b/torchvision/models/googlenet.py index 0ea3dd5d0b9..3172bf52b9d 100644 --- a/torchvision/models/googlenet.py +++ b/torchvision/models/googlenet.py @@ -290,6 +290,8 @@ class GoogLeNet_Weights(WeightsEnum): "acc@5": 89.530, } }, + "_flops": 1.498376, + "_weight_size": 49.731288, "_docs": """These weights are ported from the original paper.""", }, ) diff --git a/torchvision/models/inception.py b/torchvision/models/inception.py index 928c07ac843..f73e9965128 100644 --- a/torchvision/models/inception.py +++ b/torchvision/models/inception.py @@ -422,6 +422,8 @@ class Inception_V3_Weights(WeightsEnum): "acc@5": 93.450, } }, + "_flops": 5.713216, + "_weight_size": 103.902575, "_docs": """These weights are ported from the original paper.""", }, ) diff --git a/torchvision/models/maxvit.py b/torchvision/models/maxvit.py index 7bf92876385..32cd643187c 100644 --- a/torchvision/models/maxvit.py +++ b/torchvision/models/maxvit.py @@ -785,6 +785,8 @@ class MaxVit_T_Weights(WeightsEnum): "acc@5": 96.722, } }, + "_flops": 5.558047, + "_weight_size": 118.769322, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, ) diff --git a/torchvision/models/mnasnet.py b/torchvision/models/mnasnet.py index 48103f11585..46e99ec64b7 100644 --- a/torchvision/models/mnasnet.py +++ b/torchvision/models/mnasnet.py @@ -231,6 +231,8 @@ class MNASNet0_5_Weights(WeightsEnum): "acc@5": 87.490, } }, + "_flops": 0.104456, + "_weight_size": 8.591165, "_docs": """These weights reproduce closely the results of the paper.""", }, ) @@ -251,6 +253,8 @@ class MNASNet0_75_Weights(WeightsEnum): "acc@5": 90.496, } }, + "_flops": 0.215493, + "_weight_size": 12.302564, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe `_. @@ -273,6 +277,8 @@ class MNASNet1_0_Weights(WeightsEnum): "acc@5": 91.510, } }, + "_flops": 0.314416, + "_weight_size": 16.915318, "_docs": """These weights reproduce closely the results of the paper.""", }, ) @@ -293,6 +299,8 @@ class MNASNet1_3_Weights(WeightsEnum): "acc@5": 93.522, } }, + "_flops": 0.526362, + "_weight_size": 24.245618, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe `_. diff --git a/torchvision/models/mobilenetv2.py b/torchvision/models/mobilenetv2.py index 86b659ebd05..d87f771c640 100644 --- a/torchvision/models/mobilenetv2.py +++ b/torchvision/models/mobilenetv2.py @@ -194,6 +194,8 @@ class MobileNet_V2_Weights(WeightsEnum): "acc@5": 90.286, } }, + "_flops": 0.300774, + "_weight_size": 13.554546, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -209,6 +211,8 @@ class MobileNet_V2_Weights(WeightsEnum): "acc@5": 90.822, } }, + "_flops": 0.300774, + "_weight_size": 13.598035, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe diff --git a/torchvision/models/mobilenetv3.py b/torchvision/models/mobilenetv3.py index 715fc822ed3..1e8814b85eb 100644 --- a/torchvision/models/mobilenetv3.py +++ b/torchvision/models/mobilenetv3.py @@ -307,6 +307,8 @@ class MobileNet_V3_Large_Weights(WeightsEnum): "acc@5": 91.340, } }, + "_flops": 0.216590, + "_weight_size": 21.113799, "_docs": """These weights were trained from scratch by using a simple training recipe.""", }, ) @@ -323,6 +325,8 @@ class MobileNet_V3_Large_Weights(WeightsEnum): "acc@5": 92.566, } }, + "_flops": 0.216590, + "_weight_size": 21.106828, "_docs": """ These weights improve marginally upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -347,6 +351,8 @@ class MobileNet_V3_Small_Weights(WeightsEnum): "acc@5": 87.402, } }, + "_flops": 0.056510, + "_weight_size": 9.829093, "_docs": """ These weights improve upon the results of the original paper by using a simple training recipe. """, diff --git a/torchvision/models/optical_flow/raft.py b/torchvision/models/optical_flow/raft.py index 1773f3d5983..d063bf55650 100644 --- a/torchvision/models/optical_flow/raft.py +++ b/torchvision/models/optical_flow/raft.py @@ -552,6 +552,8 @@ class Raft_Large_Weights(WeightsEnum): "Sintel-Train-Finalpass": {"epe": 2.7894}, "Kitti-Train": {"per_image_epe": 5.0172, "fl_all": 17.4506}, }, + "_flops": 211.007046, + "_weight_size": 20.128829, "_docs": """These weights were ported from the original paper. They are trained on :class:`~torchvision.datasets.FlyingChairs` + :class:`~torchvision.datasets.FlyingThings3D`.""", @@ -570,6 +572,8 @@ class Raft_Large_Weights(WeightsEnum): "Sintel-Train-Finalpass": {"epe": 2.7161}, "Kitti-Train": {"per_image_epe": 4.5118, "fl_all": 16.0679}, }, + "_flops": 211.007046, + "_weight_size": 20.128829, "_docs": """These weights were trained from scratch on :class:`~torchvision.datasets.FlyingChairs` + :class:`~torchvision.datasets.FlyingThings3D`.""", @@ -588,6 +592,8 @@ class Raft_Large_Weights(WeightsEnum): "Sintel-Test-Cleanpass": {"epe": 1.94}, "Sintel-Test-Finalpass": {"epe": 3.18}, }, + "_flops": 211.007046, + "_weight_size": 20.128829, "_docs": """ These weights were ported from the original paper. They are trained on :class:`~torchvision.datasets.FlyingChairs` + @@ -612,6 +618,8 @@ class Raft_Large_Weights(WeightsEnum): "Sintel-Test-Cleanpass": {"epe": 1.819}, "Sintel-Test-Finalpass": {"epe": 3.067}, }, + "_flops": 211.007046, + "_weight_size": 20.128829, "_docs": """ These weights were trained from scratch. They are pre-trained on :class:`~torchvision.datasets.FlyingChairs` + @@ -636,6 +644,8 @@ class Raft_Large_Weights(WeightsEnum): "_metrics": { "Kitti-Test": {"fl_all": 5.10}, }, + "_flops": 211.007046, + "_weight_size": 20.128829, "_docs": """ These weights were ported from the original paper. They are pre-trained on :class:`~torchvision.datasets.FlyingChairs` + @@ -657,6 +667,8 @@ class Raft_Large_Weights(WeightsEnum): "_metrics": { "Kitti-Test": {"fl_all": 5.19}, }, + "_flops": 211.007046, + "_weight_size": 20.128829, "_docs": """ These weights were trained from scratch. They are pre-trained on :class:`~torchvision.datasets.FlyingChairs` + @@ -698,6 +710,8 @@ class Raft_Small_Weights(WeightsEnum): "Sintel-Train-Finalpass": {"epe": 3.2790}, "Kitti-Train": {"per_image_epe": 7.6557, "fl_all": 25.2801}, }, + "_flops": 47.655158, + "_weight_size": 3.820600, "_docs": """These weights were ported from the original paper. They are trained on :class:`~torchvision.datasets.FlyingChairs` + :class:`~torchvision.datasets.FlyingThings3D`.""", @@ -715,6 +729,8 @@ class Raft_Small_Weights(WeightsEnum): "Sintel-Train-Finalpass": {"epe": 3.2831}, "Kitti-Train": {"per_image_epe": 7.5978, "fl_all": 25.2369}, }, + "_flops": 47.655158, + "_weight_size": 3.820600, "_docs": """These weights were trained from scratch on :class:`~torchvision.datasets.FlyingChairs` + :class:`~torchvision.datasets.FlyingThings3D`.""", diff --git a/torchvision/models/regnet.py b/torchvision/models/regnet.py index 866e62c164d..77e770cc04e 100644 --- a/torchvision/models/regnet.py +++ b/torchvision/models/regnet.py @@ -428,6 +428,8 @@ class RegNet_Y_400MF_Weights(WeightsEnum): "acc@5": 91.716, } }, + "_flops": 0.401843, + "_weight_size": 16.805909, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -444,6 +446,8 @@ class RegNet_Y_400MF_Weights(WeightsEnum): "acc@5": 92.742, } }, + "_flops": 0.401843, + "_weight_size": 16.805909, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -468,6 +472,8 @@ class RegNet_Y_800MF_Weights(WeightsEnum): "acc@5": 93.136, } }, + "_flops": 0.833856, + "_weight_size": 24.773644, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -484,6 +490,8 @@ class RegNet_Y_800MF_Weights(WeightsEnum): "acc@5": 94.502, } }, + "_flops": 0.833856, + "_weight_size": 24.773522, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -508,6 +516,8 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum): "acc@5": 93.966, } }, + "_flops": 1.612076, + "_weight_size": 43.152310, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -524,6 +534,8 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum): "acc@5": 95.444, } }, + "_flops": 1.612076, + "_weight_size": 43.152310, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -548,6 +560,8 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum): "acc@5": 94.576, } }, + "_flops": 3.176483, + "_weight_size": 74.566991, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -564,6 +578,8 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum): "acc@5": 95.972, } }, + "_flops": 3.176483, + "_weight_size": 74.566991, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -588,6 +604,8 @@ class RegNet_Y_8GF_Weights(WeightsEnum): "acc@5": 95.048, } }, + "_flops": 8.473071, + "_weight_size": 150.701436, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -604,6 +622,8 @@ class RegNet_Y_8GF_Weights(WeightsEnum): "acc@5": 96.330, } }, + "_flops": 8.473071, + "_weight_size": 150.701436, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -628,6 +648,8 @@ class RegNet_Y_16GF_Weights(WeightsEnum): "acc@5": 95.240, } }, + "_flops": 15.911510, + "_weight_size": 319.490335, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -644,6 +666,8 @@ class RegNet_Y_16GF_Weights(WeightsEnum): "acc@5": 96.328, } }, + "_flops": 15.911510, + "_weight_size": 319.490335, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -665,6 +689,8 @@ class RegNet_Y_16GF_Weights(WeightsEnum): "acc@5": 98.054, } }, + "_flops": 46.734897, + "_weight_size": 319.490335, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original `SWAG `_ weights on ImageNet-1K data. @@ -686,6 +712,8 @@ class RegNet_Y_16GF_Weights(WeightsEnum): "acc@5": 97.244, } }, + "_flops": 15.911510, + "_weight_size": 319.490335, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk weights and a linear classifier learnt on top of them trained on ImageNet-1K data. @@ -709,6 +737,8 @@ class RegNet_Y_32GF_Weights(WeightsEnum): "acc@5": 95.340, } }, + "_flops": 32.279553, + "_weight_size": 554.076371, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -725,6 +755,8 @@ class RegNet_Y_32GF_Weights(WeightsEnum): "acc@5": 96.498, } }, + "_flops": 32.279553, + "_weight_size": 554.076371, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -746,6 +778,8 @@ class RegNet_Y_32GF_Weights(WeightsEnum): "acc@5": 98.362, } }, + "_flops": 94.826458, + "_weight_size": 554.076371, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original `SWAG `_ weights on ImageNet-1K data. @@ -767,6 +801,8 @@ class RegNet_Y_32GF_Weights(WeightsEnum): "acc@5": 97.480, } }, + "_flops": 32.279553, + "_weight_size": 554.076371, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk weights and a linear classifier learnt on top of them trained on ImageNet-1K data. @@ -791,6 +827,8 @@ class RegNet_Y_128GF_Weights(WeightsEnum): "acc@5": 98.682, } }, + "_flops": 374.570006, + "_weight_size": 2461.563993, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original `SWAG `_ weights on ImageNet-1K data. @@ -812,6 +850,8 @@ class RegNet_Y_128GF_Weights(WeightsEnum): "acc@5": 97.844, } }, + "_flops": 127.517816, + "_weight_size": 2461.563993, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk weights and a linear classifier learnt on top of them trained on ImageNet-1K data. @@ -835,6 +875,8 @@ class RegNet_X_400MF_Weights(WeightsEnum): "acc@5": 90.950, } }, + "_flops": 0.413813, + "_weight_size": 21.258035, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -851,6 +893,8 @@ class RegNet_X_400MF_Weights(WeightsEnum): "acc@5": 92.322, } }, + "_flops": 0.413813, + "_weight_size": 21.258035, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -875,6 +919,8 @@ class RegNet_X_800MF_Weights(WeightsEnum): "acc@5": 92.348, } }, + "_flops": 0.799700, + "_weight_size": 27.945130, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -891,6 +937,8 @@ class RegNet_X_800MF_Weights(WeightsEnum): "acc@5": 93.826, } }, + "_flops": 0.799700, + "_weight_size": 27.945130, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -915,6 +963,8 @@ class RegNet_X_1_6GF_Weights(WeightsEnum): "acc@5": 93.440, } }, + "_flops": 1.602850, + "_weight_size": 35.339471, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -931,6 +981,8 @@ class RegNet_X_1_6GF_Weights(WeightsEnum): "acc@5": 94.922, } }, + "_flops": 1.602850, + "_weight_size": 35.339471, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -955,6 +1007,8 @@ class RegNet_X_3_2GF_Weights(WeightsEnum): "acc@5": 93.992, } }, + "_flops": 3.176622, + "_weight_size": 58.755979, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -971,6 +1025,8 @@ class RegNet_X_3_2GF_Weights(WeightsEnum): "acc@5": 95.430, } }, + "_flops": 3.176622, + "_weight_size": 58.755979, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -995,6 +1051,8 @@ class RegNet_X_8GF_Weights(WeightsEnum): "acc@5": 94.686, } }, + "_flops": 7.995132, + "_weight_size": 151.455937, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -1011,6 +1069,8 @@ class RegNet_X_8GF_Weights(WeightsEnum): "acc@5": 95.678, } }, + "_flops": 7.995132, + "_weight_size": 151.455937, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -1035,6 +1095,8 @@ class RegNet_X_16GF_Weights(WeightsEnum): "acc@5": 94.944, } }, + "_flops": 15.940755, + "_weight_size": 207.627419, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -1051,6 +1113,8 @@ class RegNet_X_16GF_Weights(WeightsEnum): "acc@5": 96.196, } }, + "_flops": 15.940755, + "_weight_size": 207.627419, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -1075,6 +1139,8 @@ class RegNet_X_32GF_Weights(WeightsEnum): "acc@5": 95.248, } }, + "_flops": 31.735930, + "_weight_size": 412.039433, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -1091,6 +1157,8 @@ class RegNet_X_32GF_Weights(WeightsEnum): "acc@5": 96.288, } }, + "_flops": 31.735930, + "_weight_size": 412.039433, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe diff --git a/torchvision/models/resnet.py b/torchvision/models/resnet.py index dbf14463eaf..11291240035 100644 --- a/torchvision/models/resnet.py +++ b/torchvision/models/resnet.py @@ -323,6 +323,8 @@ class ResNet18_Weights(WeightsEnum): "acc@5": 89.078, } }, + "_flops": 1.814073, + "_weight_size": 44.661113, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -343,6 +345,8 @@ class ResNet34_Weights(WeightsEnum): "acc@5": 91.420, } }, + "_flops": 3.663761, + "_weight_size": 83.274669, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -363,6 +367,8 @@ class ResNet50_Weights(WeightsEnum): "acc@5": 92.862, } }, + "_flops": 4.089184, + "_weight_size": 97.780545, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -379,6 +385,8 @@ class ResNet50_Weights(WeightsEnum): "acc@5": 95.434, } }, + "_flops": 4.089184, + "_weight_size": 97.780545, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe `_. @@ -402,6 +410,8 @@ class ResNet101_Weights(WeightsEnum): "acc@5": 93.546, } }, + "_flops": 7.801405, + "_weight_size": 170.511188, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -418,6 +428,8 @@ class ResNet101_Weights(WeightsEnum): "acc@5": 95.780, } }, + "_flops": 7.801405, + "_weight_size": 170.530362, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe `_. @@ -441,6 +453,8 @@ class ResNet152_Weights(WeightsEnum): "acc@5": 94.046, } }, + "_flops": 11.513627, + "_weight_size": 230.434152, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -457,6 +471,8 @@ class ResNet152_Weights(WeightsEnum): "acc@5": 96.002, } }, + "_flops": 11.513627, + "_weight_size": 230.473687, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe `_. @@ -480,6 +496,8 @@ class ResNeXt50_32X4D_Weights(WeightsEnum): "acc@5": 93.698, } }, + "_flops": 4.230480, + "_weight_size": 95.788646, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -496,6 +514,8 @@ class ResNeXt50_32X4D_Weights(WeightsEnum): "acc@5": 95.340, } }, + "_flops": 4.230480, + "_weight_size": 95.833192, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe `_. @@ -519,6 +539,8 @@ class ResNeXt101_32X8D_Weights(WeightsEnum): "acc@5": 94.526, } }, + "_flops": 16.414015, + "_weight_size": 339.586349, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -535,6 +557,8 @@ class ResNeXt101_32X8D_Weights(WeightsEnum): "acc@5": 96.228, } }, + "_flops": 16.414015, + "_weight_size": 339.586349, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe `_. @@ -558,6 +582,8 @@ class ResNeXt101_64X4D_Weights(WeightsEnum): "acc@5": 96.454, } }, + "_flops": 15.460270, + "_weight_size": 319.317594, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe `_. @@ -581,6 +607,8 @@ class Wide_ResNet50_2_Weights(WeightsEnum): "acc@5": 94.086, } }, + "_flops": 11.398021, + "_weight_size": 131.820194, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -597,6 +625,8 @@ class Wide_ResNet50_2_Weights(WeightsEnum): "acc@5": 95.758, } }, + "_flops": 11.398021, + "_weight_size": 263.124207, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe `_. @@ -620,6 +650,8 @@ class Wide_ResNet101_2_Weights(WeightsEnum): "acc@5": 94.284, } }, + "_flops": 22.753051, + "_weight_size": 242.896219, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -636,6 +668,8 @@ class Wide_ResNet101_2_Weights(WeightsEnum): "acc@5": 96.020, } }, + "_flops": 22.753051, + "_weight_size": 484.747281, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe `_. diff --git a/torchvision/models/segmentation/deeplabv3.py b/torchvision/models/segmentation/deeplabv3.py index 29ab0154807..dc5e5b2be55 100644 --- a/torchvision/models/segmentation/deeplabv3.py +++ b/torchvision/models/segmentation/deeplabv3.py @@ -152,6 +152,8 @@ class DeepLabV3_ResNet50_Weights(WeightsEnum): "pixel_acc": 92.4, } }, + "_flops": 178.721945, + "_weight_size": 160.514977, }, ) DEFAULT = COCO_WITH_VOC_LABELS_V1 @@ -171,6 +173,8 @@ class DeepLabV3_ResNet101_Weights(WeightsEnum): "pixel_acc": 92.4, } }, + "_flops": 258.743039, + "_weight_size": 233.216800, }, ) DEFAULT = COCO_WITH_VOC_LABELS_V1 @@ -190,6 +194,8 @@ class DeepLabV3_MobileNet_V3_Large_Weights(WeightsEnum): "pixel_acc": 91.2, } }, + "_flops": 10.452448, + "_weight_size": 42.301330, }, ) DEFAULT = COCO_WITH_VOC_LABELS_V1 diff --git a/torchvision/models/segmentation/fcn.py b/torchvision/models/segmentation/fcn.py index 6f1c9c4b80b..046c85b4d18 100644 --- a/torchvision/models/segmentation/fcn.py +++ b/torchvision/models/segmentation/fcn.py @@ -71,6 +71,8 @@ class FCN_ResNet50_Weights(WeightsEnum): "pixel_acc": 91.4, } }, + "_flops": 152.716512, + "_weight_size": 135.009211, }, ) DEFAULT = COCO_WITH_VOC_LABELS_V1 @@ -90,6 +92,8 @@ class FCN_ResNet101_Weights(WeightsEnum): "pixel_acc": 91.9, } }, + "_flops": 232.737606, + "_weight_size": 207.711034, }, ) DEFAULT = COCO_WITH_VOC_LABELS_V1 diff --git a/torchvision/models/segmentation/lraspp.py b/torchvision/models/segmentation/lraspp.py index 44c96f1c272..c0ec738d786 100644 --- a/torchvision/models/segmentation/lraspp.py +++ b/torchvision/models/segmentation/lraspp.py @@ -108,6 +108,8 @@ class LRASPP_MobileNet_V3_Large_Weights(WeightsEnum): "pixel_acc": 91.2, } }, + "_flops": 2.086220, + "_weight_size": 12.490331, "_docs": """ These weights were trained on a subset of COCO, using only the 20 categories that are present in the Pascal VOC dataset. diff --git a/torchvision/models/shufflenetv2.py b/torchvision/models/shufflenetv2.py index 159e1be3bc8..d91353a442d 100644 --- a/torchvision/models/shufflenetv2.py +++ b/torchvision/models/shufflenetv2.py @@ -204,6 +204,8 @@ class ShuffleNet_V2_X0_5_Weights(WeightsEnum): "acc@5": 81.746, } }, + "_flops": 0.040476, + "_weight_size": 5.281570, "_docs": """These weights were trained from scratch to reproduce closely the results of the paper.""", }, ) @@ -224,6 +226,8 @@ class ShuffleNet_V2_X1_0_Weights(WeightsEnum): "acc@5": 88.316, } }, + "_flops": 0.144908, + "_weight_size": 8.791250, "_docs": """These weights were trained from scratch to reproduce closely the results of the paper.""", }, ) @@ -244,6 +248,8 @@ class ShuffleNet_V2_X1_5_Weights(WeightsEnum): "acc@5": 91.086, } }, + "_flops": 0.295759, + "_weight_size": 13.557034, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe `_. @@ -267,6 +273,8 @@ class ShuffleNet_V2_X2_0_Weights(WeightsEnum): "acc@5": 93.006, } }, + "_flops": 0.583253, + "_weight_size": 28.432767, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe `_. diff --git a/torchvision/models/squeezenet.py b/torchvision/models/squeezenet.py index 9fe6521e1a1..ca6c144f35d 100644 --- a/torchvision/models/squeezenet.py +++ b/torchvision/models/squeezenet.py @@ -135,6 +135,8 @@ class SqueezeNet1_0_Weights(WeightsEnum): "acc@5": 80.420, } }, + "_flops": 0.818925, + "_weight_size": 4.778434, }, ) DEFAULT = IMAGENET1K_V1 @@ -154,6 +156,8 @@ class SqueezeNet1_1_Weights(WeightsEnum): "acc@5": 80.624, } }, + "_flops": 0.349152, + "_weight_size": 4.729117, }, ) DEFAULT = IMAGENET1K_V1 diff --git a/torchvision/models/swin_transformer.py b/torchvision/models/swin_transformer.py index 64714c3afac..c5d7d8d04f8 100644 --- a/torchvision/models/swin_transformer.py +++ b/torchvision/models/swin_transformer.py @@ -660,6 +660,8 @@ class Swin_T_Weights(WeightsEnum): "acc@5": 95.776, } }, + "_flops": 4.490567, + "_weight_size": 108.190383, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, ) @@ -683,6 +685,8 @@ class Swin_S_Weights(WeightsEnum): "acc@5": 96.360, } }, + "_flops": 8.740875, + "_weight_size": 189.786254, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, ) @@ -706,6 +710,8 @@ class Swin_B_Weights(WeightsEnum): "acc@5": 96.640, } }, + "_flops": 15.430947, + "_weight_size": 335.363585, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, ) @@ -729,6 +735,8 @@ class Swin_V2_T_Weights(WeightsEnum): "acc@5": 96.132, } }, + "_flops": 5.939690, + "_weight_size": 108.625840, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, ) @@ -752,6 +760,8 @@ class Swin_V2_S_Weights(WeightsEnum): "acc@5": 96.816, } }, + "_flops": 11.545857, + "_weight_size": 190.674577, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, ) @@ -775,6 +785,8 @@ class Swin_V2_B_Weights(WeightsEnum): "acc@5": 96.864, } }, + "_flops": 20.325134, + "_weight_size": 336.371781, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, ) diff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py index dea783c2fb1..8bb5b806b45 100644 --- a/torchvision/models/vgg.py +++ b/torchvision/models/vgg.py @@ -127,6 +127,8 @@ class VGG11_Weights(WeightsEnum): "acc@5": 88.628, } }, + "_flops": 7.609090, + "_weight_size": 506.840077, }, ) DEFAULT = IMAGENET1K_V1 @@ -145,6 +147,8 @@ class VGG11_BN_Weights(WeightsEnum): "acc@5": 89.810, } }, + "_flops": 7.609090, + "_weight_size": 506.881400, }, ) DEFAULT = IMAGENET1K_V1 @@ -163,6 +167,8 @@ class VGG13_Weights(WeightsEnum): "acc@5": 89.246, } }, + "_flops": 11.308466, + "_weight_size": 507.545068, }, ) DEFAULT = IMAGENET1K_V1 @@ -181,6 +187,8 @@ class VGG13_BN_Weights(WeightsEnum): "acc@5": 90.374, } }, + "_flops": 11.308466, + "_weight_size": 507.589627, }, ) DEFAULT = IMAGENET1K_V1 @@ -199,6 +207,8 @@ class VGG16_Weights(WeightsEnum): "acc@5": 90.382, } }, + "_flops": 15.470264, + "_weight_size": 527.795678, }, ) IMAGENET1K_FEATURES = Weights( @@ -221,6 +231,8 @@ class VGG16_Weights(WeightsEnum): "acc@5": float("nan"), } }, + "_flops": 15.470264, + "_weight_size": 527.801824, "_docs": """ These weights can't be used for classification because they are missing values in the `classifier` module. Only the `features` module has valid values and can be used for feature extraction. The weights @@ -244,6 +256,8 @@ class VGG16_BN_Weights(WeightsEnum): "acc@5": 91.516, } }, + "_flops": 15.470264, + "_weight_size": 527.866207, }, ) DEFAULT = IMAGENET1K_V1 @@ -262,6 +276,8 @@ class VGG19_Weights(WeightsEnum): "acc@5": 90.876, } }, + "_flops": 19.632062, + "_weight_size": 548.051225, }, ) DEFAULT = IMAGENET1K_V1 @@ -280,6 +296,8 @@ class VGG19_BN_Weights(WeightsEnum): "acc@5": 91.842, } }, + "_flops": 19.632062, + "_weight_size": 548.142819, }, ) DEFAULT = IMAGENET1K_V1 diff --git a/torchvision/models/video/mvit.py b/torchvision/models/video/mvit.py index 1b5118b53f5..42be6470066 100644 --- a/torchvision/models/video/mvit.py +++ b/torchvision/models/video/mvit.py @@ -624,6 +624,8 @@ class MViT_V1_B_Weights(WeightsEnum): "acc@5": 93.582, } }, + "_flops": 70.599408, + "_weight_size": 139.764235, }, ) DEFAULT = KINETICS400_V1 @@ -655,6 +657,8 @@ class MViT_V2_S_Weights(WeightsEnum): "acc@5": 94.665, } }, + "_flops": 64.223816, + "_weight_size": 131.883704, }, ) DEFAULT = KINETICS400_V1 diff --git a/torchvision/models/video/resnet.py b/torchvision/models/video/resnet.py index 352ae92d194..b0873f50bd0 100644 --- a/torchvision/models/video/resnet.py +++ b/torchvision/models/video/resnet.py @@ -332,6 +332,8 @@ class R3D_18_Weights(WeightsEnum): "acc@5": 83.479, } }, + "_flops": 40.696553, + "_weight_size": 127.359406, }, ) DEFAULT = KINETICS400_V1 @@ -350,6 +352,8 @@ class MC3_18_Weights(WeightsEnum): "acc@5": 84.130, } }, + "_flops": 43.342635, + "_weight_size": 44.671906, }, ) DEFAULT = KINETICS400_V1 @@ -368,6 +372,8 @@ class R2Plus1D_18_Weights(WeightsEnum): "acc@5": 86.175, } }, + "_flops": 40.519081, + "_weight_size": 120.318409, }, ) DEFAULT = KINETICS400_V1 diff --git a/torchvision/models/video/s3d.py b/torchvision/models/video/s3d.py index 53e3e841a27..64df9eed469 100644 --- a/torchvision/models/video/s3d.py +++ b/torchvision/models/video/s3d.py @@ -175,6 +175,8 @@ class S3D_Weights(WeightsEnum): "acc@5": 88.050, } }, + "_flops": 17.978771, + "_weight_size": 31.971929, }, ) DEFAULT = KINETICS400_V1 diff --git a/torchvision/models/vision_transformer.py b/torchvision/models/vision_transformer.py index be62ce1ce96..6e042097098 100644 --- a/torchvision/models/vision_transformer.py +++ b/torchvision/models/vision_transformer.py @@ -363,6 +363,8 @@ class ViT_B_16_Weights(WeightsEnum): "acc@5": 95.318, } }, + "_flops": 17.563828, + "_weight_size": 330.284623, "_docs": """ These weights were trained from scratch by using a modified version of `DeIT `_'s training recipe. @@ -387,6 +389,8 @@ class ViT_B_16_Weights(WeightsEnum): "acc@5": 97.650, } }, + "_flops": 55.484350, + "_weight_size": 331.397904, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original `SWAG `_ weights on ImageNet-1K data. @@ -412,6 +416,8 @@ class ViT_B_16_Weights(WeightsEnum): "acc@5": 96.180, } }, + "_flops": 17.563828, + "_weight_size": 330.284623, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk weights and a linear classifier learnt on top of them trained on ImageNet-1K data. @@ -436,6 +442,8 @@ class ViT_B_32_Weights(WeightsEnum): "acc@5": 92.466, } }, + "_flops": 4.409186, + "_weight_size": 336.603959, "_docs": """ These weights were trained from scratch by using a modified version of `DeIT `_'s training recipe. @@ -460,6 +468,8 @@ class ViT_L_16_Weights(WeightsEnum): "acc@5": 94.638, } }, + "_flops": 61.554713, + "_weight_size": 1161.023240, "_docs": """ These weights were trained from scratch by using a modified version of TorchVision's `new training recipe @@ -485,6 +495,8 @@ class ViT_L_16_Weights(WeightsEnum): "acc@5": 98.512, } }, + "_flops": 361.986286, + "_weight_size": 1164.257615, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original `SWAG `_ weights on ImageNet-1K data. @@ -510,6 +522,8 @@ class ViT_L_16_Weights(WeightsEnum): "acc@5": 97.422, } }, + "_flops": 61.554713, + "_weight_size": 1161.023240, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk weights and a linear classifier learnt on top of them trained on ImageNet-1K data. @@ -534,6 +548,8 @@ class ViT_L_32_Weights(WeightsEnum): "acc@5": 93.07, } }, + "_flops": 15.377539, + "_weight_size": 1169.448960, "_docs": """ These weights were trained from scratch by using a modified version of `DeIT `_'s training recipe. @@ -562,6 +578,8 @@ class ViT_H_14_Weights(WeightsEnum): "acc@5": 98.694, } }, + "_flops": 1016.716764, + "_weight_size": 2416.643174, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original `SWAG `_ weights on ImageNet-1K data. @@ -587,6 +605,8 @@ class ViT_H_14_Weights(WeightsEnum): "acc@5": 97.730, } }, + "_flops": 167.295109, + "_weight_size": 2411.208604, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk weights and a linear classifier learnt on top of them trained on ImageNet-1K data. From f7164ec93a9620d10ea47e75c7a3fe46a3d2e9f5 Mon Sep 17 00:00:00 2001 From: Toni Blaslov Date: Thu, 10 Nov 2022 11:42:57 +0000 Subject: [PATCH 02/15] Adding weight size to quantization models --- test/test_extended_models.py | 4 ++-- torchvision/models/quantization/googlenet.py | 1 + torchvision/models/quantization/inception.py | 1 + torchvision/models/quantization/mobilenetv2.py | 1 + torchvision/models/quantization/mobilenetv3.py | 1 + torchvision/models/quantization/resnet.py | 6 ++++++ torchvision/models/quantization/shufflenetv2.py | 4 ++++ 7 files changed, 16 insertions(+), 2 deletions(-) diff --git a/test/test_extended_models.py b/test/test_extended_models.py index 6948989c9e8..df116e13a98 100644 --- a/test/test_extended_models.py +++ b/test/test_extended_models.py @@ -160,9 +160,9 @@ def test_schema_meta_validation(model_fn): } # mandatory fields for each computer vision task classification_fields = {"categories", ("_metrics", "ImageNet-1K", "acc@1"), ("_metrics", "ImageNet-1K", "acc@5")} - rich_metadata = {"_flops", "_weight_size"} + rich_metadata = {"_flops"} defaults = { - "all": {"_metrics", "min_size", "num_params", "recipe", "_docs"}, + "all": {"_metrics", "min_size", "num_params", "recipe", "_docs", "_weight_size"}, "models": classification_fields | rich_metadata, "detection": {"categories", ("_metrics", "COCO-val2017", "box_map")} | rich_metadata, "quantization": classification_fields | {"backend", "unquantized"}, diff --git a/torchvision/models/quantization/googlenet.py b/torchvision/models/quantization/googlenet.py index abf2184acec..8c3e72fdc25 100644 --- a/torchvision/models/quantization/googlenet.py +++ b/torchvision/models/quantization/googlenet.py @@ -123,6 +123,7 @@ class GoogLeNet_QuantizedWeights(WeightsEnum): "acc@5": 89.404, } }, + "_weight_size": 12.617729, "_docs": """ These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized weights listed below. diff --git a/torchvision/models/quantization/inception.py b/torchvision/models/quantization/inception.py index 34cd2a0a36a..a8baac65f55 100644 --- a/torchvision/models/quantization/inception.py +++ b/torchvision/models/quantization/inception.py @@ -183,6 +183,7 @@ class Inception_V3_QuantizedWeights(WeightsEnum): "acc@5": 93.354, } }, + "_weight_size": 23.145652, "_docs": """ These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized weights listed below. diff --git a/torchvision/models/quantization/mobilenetv2.py b/torchvision/models/quantization/mobilenetv2.py index 1f91967f146..0b4211f08e2 100644 --- a/torchvision/models/quantization/mobilenetv2.py +++ b/torchvision/models/quantization/mobilenetv2.py @@ -80,6 +80,7 @@ class MobileNet_V2_QuantizedWeights(WeightsEnum): "acc@5": 90.150, } }, + "_weight_size": 3.422719, "_docs": """ These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized weights listed below. diff --git a/torchvision/models/quantization/mobilenetv3.py b/torchvision/models/quantization/mobilenetv3.py index 53229c09534..220d1b8e25d 100644 --- a/torchvision/models/quantization/mobilenetv3.py +++ b/torchvision/models/quantization/mobilenetv3.py @@ -175,6 +175,7 @@ class MobileNet_V3_Large_QuantizedWeights(WeightsEnum): "acc@5": 90.858, } }, + "_weight_size": 21.553815, "_docs": """ These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized weights listed below. diff --git a/torchvision/models/quantization/resnet.py b/torchvision/models/quantization/resnet.py index 286c040b006..8074e8e327c 100644 --- a/torchvision/models/quantization/resnet.py +++ b/torchvision/models/quantization/resnet.py @@ -175,6 +175,7 @@ class ResNet18_QuantizedWeights(WeightsEnum): "acc@5": 88.882, } }, + "_weight_size": 11.238080, }, ) DEFAULT = IMAGENET1K_FBGEMM_V1 @@ -194,6 +195,7 @@ class ResNet50_QuantizedWeights(WeightsEnum): "acc@5": 92.814, } }, + "_weight_size": 24.758719, }, ) IMAGENET1K_FBGEMM_V2 = Weights( @@ -209,6 +211,7 @@ class ResNet50_QuantizedWeights(WeightsEnum): "acc@5": 94.976, } }, + "_weight_size": 24.953237, }, ) DEFAULT = IMAGENET1K_FBGEMM_V2 @@ -228,6 +231,7 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum): "acc@5": 94.480, } }, + "_weight_size": 86.034273, }, ) IMAGENET1K_FBGEMM_V2 = Weights( @@ -243,6 +247,7 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum): "acc@5": 96.132, } }, + "_weight_size": 86.645276, }, ) DEFAULT = IMAGENET1K_FBGEMM_V2 @@ -263,6 +268,7 @@ class ResNeXt101_64X4D_QuantizedWeights(WeightsEnum): "acc@5": 96.326, } }, + "_weight_size": 81.556409, }, ) DEFAULT = IMAGENET1K_FBGEMM_V1 diff --git a/torchvision/models/quantization/shufflenetv2.py b/torchvision/models/quantization/shufflenetv2.py index a6317e28b23..54c58f58a70 100644 --- a/torchvision/models/quantization/shufflenetv2.py +++ b/torchvision/models/quantization/shufflenetv2.py @@ -139,6 +139,7 @@ class ShuffleNet_V2_X0_5_QuantizedWeights(WeightsEnum): "acc@5": 79.780, } }, + "_weight_size": 1.500871, }, ) DEFAULT = IMAGENET1K_FBGEMM_V1 @@ -158,6 +159,7 @@ class ShuffleNet_V2_X1_0_QuantizedWeights(WeightsEnum): "acc@5": 87.582, } }, + "_weight_size": 2.333858, }, ) DEFAULT = IMAGENET1K_FBGEMM_V1 @@ -178,6 +180,7 @@ class ShuffleNet_V2_X1_5_QuantizedWeights(WeightsEnum): "acc@5": 90.700, } }, + "_weight_size": 3.671769, }, ) DEFAULT = IMAGENET1K_FBGEMM_V1 @@ -198,6 +201,7 @@ class ShuffleNet_V2_X2_0_QuantizedWeights(WeightsEnum): "acc@5": 92.488, } }, + "_weight_size": 7.467118, }, ) DEFAULT = IMAGENET1K_FBGEMM_V1 From a5daa03474f816dcca84ca7df775adb91d8f7908 Mon Sep 17 00:00:00 2001 From: Toni Blaslov Date: Thu, 10 Nov 2022 11:44:11 +0000 Subject: [PATCH 03/15] Small refactor of rich metadata --- docs/source/conf.py | 70 ++++++++++++++++++++++++++------------------- 1 file changed, 41 insertions(+), 29 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 2c258edf89d..799b1de5e16 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -24,7 +24,9 @@ import sys import textwrap from copy import copy +from dataclasses import dataclass from pathlib import Path +from typing import Callable import pytorch_sphinx_theme import torchvision @@ -375,9 +377,20 @@ def inject_weight_metadata(app, what, name, obj, options, lines): lines.append("") -def generate_weights_table( - module, table_name, metrics, dataset, include_patterns=None, exclude_patterns=None, exclude_columns=[] -): +@dataclass +class Column: + name: str + width: int + generator: Callable + + def get_bold_name(self): + return f"**{self.name}**" + + def create_entry(self, w): + return self.generator(w) + + +def generate_weights_table(module, table_name, metrics, dataset, include_patterns=None, exclude_patterns=None): weights_endswith = "_QuantizedWeights" if module.__name__.split(".")[-1] == "quantization" else "_Weights" weight_enums = [getattr(module, name) for name in dir(module) if name.endswith(weights_endswith)] weights = [w for weight_enum in weight_enums for w in weight_enum] @@ -387,34 +400,34 @@ def generate_weights_table( if exclude_patterns is not None: weights = [w for w in weights if all(p not in str(w) for p in exclude_patterns)] - rich_metadata = ["GFLOPs", "Size (MB)"] + rich_metadata = ["Size (MB)"] + if "_flops" in weights[0].meta: # assumes same rich meta for all models in module + rich_metadata = ["GFLOPs"] + rich_metadata metrics_keys, metrics_names = zip(*metrics) column_names = ["Weight"] + list(metrics_names) + ["Params"] + rich_metadata + ["Recipe"] # Final column order - column_names_table = [f"**{name}**" for name in column_names if name not in exclude_columns] # Add bold - - column_patterns = list( - zip( - column_names, - [lambda w: f":class:`{w} <{type(w).__name__}>`"] - + [lambda w, metric=m: w.meta["_metrics"][dataset][metric] for m in metrics_keys] - + [ - lambda w: f"{w.meta['num_params']/1e6:.1f}M", - lambda w: f"{w.meta['_flops']:.3f}", - lambda w: f"{round(w.meta['_weight_size'], 1):.1f}", - lambda w: f"`link <{w.meta['recipe']}>`__", - ], - ) - ) - - content = [ - [pattern(w) for col_name, pattern in column_patterns if col_name not in exclude_columns] for w in weights - ] - - column_widths = zip(column_names, ["120"] + ["18"] * len(metrics_names) + ["18", "18", "18", "10"]) - widths_table = " ".join(width for col_name, width in column_widths if col_name not in exclude_columns) - - table = tabulate(content, headers=column_names_table, tablefmt="rst") + column_names = [f"**{name}**" for name in column_names] # Add bold + + content = [] + for w in weights: + row = [ + f":class:`{w} <{type(w).__name__}>`", + *(w.meta["_metrics"][dataset][metric] for metric in metrics_keys), + f"{w.meta['num_params']/1e6:.1f}M", + ] + + if "_flops" in w.meta: + row.append(f"{w.meta['_flops']:.3f}") + + row.append(f"{round(w.meta['_weight_size'], 1):.1f}") + row.append(f"`link <{w.meta['recipe']}>`__") + + content.append(row) + + column_widths = ["110"] + ["18"] * len(metrics_names) + ["18"] + ["18"] * len(rich_metadata) + ["10"] + widths_table = " ".join(column_widths) + + table = tabulate(content, headers=column_names, tablefmt="rst") generated_dir = Path("generated") generated_dir.mkdir(exist_ok=True) @@ -433,7 +446,6 @@ def generate_weights_table( table_name="classification_quant", metrics=[("acc@1", "Acc@1"), ("acc@5", "Acc@5")], dataset="ImageNet-1K", - exclude_columns=["GFLOPs", "Size (MB)"], ) generate_weights_table( module=M.detection, From dbf330849ffe07ebd62f46db531f4e0b52ed95ee Mon Sep 17 00:00:00 2001 From: Toni Blaslov Date: Thu, 10 Nov 2022 11:49:12 +0000 Subject: [PATCH 04/15] Removing unused code --- docs/source/conf.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 799b1de5e16..4dcb0cb61cd 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -24,9 +24,7 @@ import sys import textwrap from copy import copy -from dataclasses import dataclass from pathlib import Path -from typing import Callable import pytorch_sphinx_theme import torchvision @@ -377,19 +375,6 @@ def inject_weight_metadata(app, what, name, obj, options, lines): lines.append("") -@dataclass -class Column: - name: str - width: int - generator: Callable - - def get_bold_name(self): - return f"**{self.name}**" - - def create_entry(self, w): - return self.generator(w) - - def generate_weights_table(module, table_name, metrics, dataset, include_patterns=None, exclude_patterns=None): weights_endswith = "_QuantizedWeights" if module.__name__.split(".")[-1] == "quantization" else "_Weights" weight_enums = [getattr(module, name) for name in dir(module) if name.endswith(weights_endswith)] From 367fee9a66a908d8d5b20f1b63074c59a7c9103f Mon Sep 17 00:00:00 2001 From: Toni Blaslov Date: Thu, 10 Nov 2022 16:42:45 +0000 Subject: [PATCH 05/15] Fixing wrong entries --- torchvision/models/regnet.py | 2 +- torchvision/models/resnet.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/torchvision/models/regnet.py b/torchvision/models/regnet.py index 77e770cc04e..791656d39b4 100644 --- a/torchvision/models/regnet.py +++ b/torchvision/models/regnet.py @@ -894,7 +894,7 @@ class RegNet_X_400MF_Weights(WeightsEnum): } }, "_flops": 0.413813, - "_weight_size": 21.258035, + "_weight_size": 21.256570, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe diff --git a/torchvision/models/resnet.py b/torchvision/models/resnet.py index 11291240035..9a580baf31b 100644 --- a/torchvision/models/resnet.py +++ b/torchvision/models/resnet.py @@ -386,7 +386,7 @@ class ResNet50_Weights(WeightsEnum): } }, "_flops": 4.089184, - "_weight_size": 97.780545, + "_weight_size": 97.790162, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe `_. @@ -558,7 +558,7 @@ class ResNeXt101_32X8D_Weights(WeightsEnum): } }, "_flops": 16.414015, - "_weight_size": 339.586349, + "_weight_size": 339.673062, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe `_. From 5bc9601a9417d72ad922db5d4e86d25ceaa3f47f Mon Sep 17 00:00:00 2001 From: Toni Blaslov Date: Fri, 11 Nov 2022 11:20:57 +0000 Subject: [PATCH 06/15] Adding .DS_Store to gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index f16b54061e0..d39404b1fdf 100644 --- a/.gitignore +++ b/.gitignore @@ -35,6 +35,7 @@ gen.yml *.orig *-checkpoint.ipynb *.venv +*.DS_Store ## Xcode User settings xcuserdata/ From f21ceae80c53dfda962a497be675862edc6cab0d Mon Sep 17 00:00:00 2001 From: Toni Blaslov Date: Fri, 11 Nov 2022 12:12:44 +0000 Subject: [PATCH 07/15] Renaming _flops to _ops --- test/test_extended_models.py | 17 ++--- torchvision/models/alexnet.py | 2 +- torchvision/models/convnext.py | 8 +-- torchvision/models/densenet.py | 8 +-- torchvision/models/detection/faster_rcnn.py | 8 +-- torchvision/models/detection/fcos.py | 2 +- torchvision/models/detection/keypoint_rcnn.py | 4 +- torchvision/models/detection/mask_rcnn.py | 4 +- torchvision/models/detection/retinanet.py | 4 +- torchvision/models/detection/ssd.py | 2 +- torchvision/models/detection/ssdlite.py | 2 +- torchvision/models/efficientnet.py | 24 +++---- torchvision/models/googlenet.py | 2 +- torchvision/models/inception.py | 2 +- torchvision/models/maxvit.py | 2 +- torchvision/models/mnasnet.py | 8 +-- torchvision/models/mobilenetv2.py | 4 +- torchvision/models/mobilenetv3.py | 6 +- torchvision/models/optical_flow/raft.py | 16 ++--- torchvision/models/regnet.py | 68 +++++++++---------- torchvision/models/resnet.py | 34 +++++----- torchvision/models/segmentation/deeplabv3.py | 6 +- torchvision/models/segmentation/fcn.py | 4 +- torchvision/models/segmentation/lraspp.py | 2 +- torchvision/models/shufflenetv2.py | 8 +-- torchvision/models/squeezenet.py | 4 +- torchvision/models/swin_transformer.py | 12 ++-- torchvision/models/vgg.py | 18 ++--- torchvision/models/video/mvit.py | 4 +- torchvision/models/video/resnet.py | 6 +- torchvision/models/video/s3d.py | 2 +- torchvision/models/vision_transformer.py | 20 +++--- 32 files changed, 155 insertions(+), 158 deletions(-) diff --git a/test/test_extended_models.py b/test/test_extended_models.py index df116e13a98..c3bb5d65396 100644 --- a/test/test_extended_models.py +++ b/test/test_extended_models.py @@ -155,26 +155,23 @@ def test_schema_meta_validation(model_fn): "recipe", "unquantized", "_docs", - "_flops", + "_ops", "_weight_size", } # mandatory fields for each computer vision task classification_fields = {"categories", ("_metrics", "ImageNet-1K", "acc@1"), ("_metrics", "ImageNet-1K", "acc@5")} - rich_metadata = {"_flops"} defaults = { - "all": {"_metrics", "min_size", "num_params", "recipe", "_docs", "_weight_size"}, - "models": classification_fields | rich_metadata, - "detection": {"categories", ("_metrics", "COCO-val2017", "box_map")} | rich_metadata, + "all": {"_metrics", "min_size", "num_params", "recipe", "_docs", "_weight_size", "_ops"}, + "models": classification_fields, + "detection": {"categories", ("_metrics", "COCO-val2017", "box_map")}, "quantization": classification_fields | {"backend", "unquantized"}, "segmentation": { "categories", ("_metrics", "COCO-val2017-VOC-labels", "miou"), ("_metrics", "COCO-val2017-VOC-labels", "pixel_acc"), - } - | rich_metadata, - "video": {"categories", ("_metrics", "Kinetics-400", "acc@1"), ("_metrics", "Kinetics-400", "acc@5")} - | rich_metadata, - "optical_flow": rich_metadata, + }, + "video": {"categories", ("_metrics", "Kinetics-400", "acc@1"), ("_metrics", "Kinetics-400", "acc@5")}, + "optical_flow": set(), } model_name = model_fn.__name__ module_name = model_fn.__module__.split(".")[-2] diff --git a/torchvision/models/alexnet.py b/torchvision/models/alexnet.py index cc8fbb6c761..9d36fb2b7ee 100644 --- a/torchvision/models/alexnet.py +++ b/torchvision/models/alexnet.py @@ -67,7 +67,7 @@ class AlexNet_Weights(WeightsEnum): "acc@5": 79.066, } }, - "_flops": 0.714188, + "_ops": 0.714188, "_weight_size": 233.086501, "_docs": """ These weights reproduce closely the results of the paper using a simplified training recipe. diff --git a/torchvision/models/convnext.py b/torchvision/models/convnext.py index c3a1fb81c7a..eae447d4f81 100644 --- a/torchvision/models/convnext.py +++ b/torchvision/models/convnext.py @@ -219,7 +219,7 @@ class ConvNeXt_Tiny_Weights(WeightsEnum): "acc@5": 96.146, } }, - "_flops": 4.455531, + "_ops": 4.455531, "_weight_size": 109.118672, }, ) @@ -239,7 +239,7 @@ class ConvNeXt_Small_Weights(WeightsEnum): "acc@5": 96.650, } }, - "_flops": 8.683712, + "_ops": 8.683712, "_weight_size": 191.702775, }, ) @@ -259,7 +259,7 @@ class ConvNeXt_Base_Weights(WeightsEnum): "acc@5": 96.870, } }, - "_flops": 15.354729, + "_ops": 15.354729, "_weight_size": 338.064286, }, ) @@ -279,7 +279,7 @@ class ConvNeXt_Large_Weights(WeightsEnum): "acc@5": 96.976, } }, - "_flops": 34.361434, + "_ops": 34.361434, "_weight_size": 754.537187, }, ) diff --git a/torchvision/models/densenet.py b/torchvision/models/densenet.py index cef35b9b47d..2288bc4ffa1 100644 --- a/torchvision/models/densenet.py +++ b/torchvision/models/densenet.py @@ -277,7 +277,7 @@ class DenseNet121_Weights(WeightsEnum): "acc@5": 91.972, } }, - "_flops": 2.834162, + "_ops": 2.834162, "_weight_size": 30.844645, }, ) @@ -297,7 +297,7 @@ class DenseNet161_Weights(WeightsEnum): "acc@5": 93.560, } }, - "_flops": 7.727907, + "_ops": 7.727907, "_weight_size": 110.369482, }, ) @@ -317,7 +317,7 @@ class DenseNet169_Weights(WeightsEnum): "acc@5": 92.806, } }, - "_flops": 3.359843, + "_ops": 3.359843, "_weight_size": 54.708029, }, ) @@ -337,7 +337,7 @@ class DenseNet201_Weights(WeightsEnum): "acc@5": 93.370, } }, - "_flops": 4.291366, + "_ops": 4.291366, "_weight_size": 77.373247, }, ) diff --git a/torchvision/models/detection/faster_rcnn.py b/torchvision/models/detection/faster_rcnn.py index 6cac8d5f589..8faff726850 100644 --- a/torchvision/models/detection/faster_rcnn.py +++ b/torchvision/models/detection/faster_rcnn.py @@ -388,7 +388,7 @@ class FasterRCNN_ResNet50_FPN_Weights(WeightsEnum): "box_map": 37.0, } }, - "_flops": 134.379721, + "_ops": 134.379721, "_weight_size": 159.743153, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, @@ -409,7 +409,7 @@ class FasterRCNN_ResNet50_FPN_V2_Weights(WeightsEnum): "box_map": 46.7, } }, - "_flops": 280.370729, + "_ops": 280.370729, "_weight_size": 167.104394, "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""", }, @@ -430,7 +430,7 @@ class FasterRCNN_MobileNet_V3_Large_FPN_Weights(WeightsEnum): "box_map": 32.8, } }, - "_flops": 4.493592, + "_ops": 4.493592, "_weight_size": 74.238593, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, @@ -451,7 +451,7 @@ class FasterRCNN_MobileNet_V3_Large_320_FPN_Weights(WeightsEnum): "box_map": 22.8, } }, - "_flops": 0.718998, + "_ops": 0.718998, "_weight_size": 74.238593, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, diff --git a/torchvision/models/detection/fcos.py b/torchvision/models/detection/fcos.py index 9bc78bba81e..3b862c2aada 100644 --- a/torchvision/models/detection/fcos.py +++ b/torchvision/models/detection/fcos.py @@ -662,7 +662,7 @@ class FCOS_ResNet50_FPN_Weights(WeightsEnum): "box_map": 39.2, } }, - "_flops": 128.207053, + "_ops": 128.207053, "_weight_size": 123.607730, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, diff --git a/torchvision/models/detection/keypoint_rcnn.py b/torchvision/models/detection/keypoint_rcnn.py index 0a565807bd5..5a635265a3e 100644 --- a/torchvision/models/detection/keypoint_rcnn.py +++ b/torchvision/models/detection/keypoint_rcnn.py @@ -328,7 +328,7 @@ class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum): "kp_map": 61.1, } }, - "_flops": 133.924041, + "_ops": 133.924041, "_weight_size": 226.053994, "_docs": """ These weights were produced by following a similar training recipe as on the paper but use a checkpoint @@ -349,7 +349,7 @@ class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum): "kp_map": 65.0, } }, - "_flops": 137.419502, + "_ops": 137.419502, "_weight_size": 226.053994, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, diff --git a/torchvision/models/detection/mask_rcnn.py b/torchvision/models/detection/mask_rcnn.py index 511f0137db4..f8f3c878ed1 100644 --- a/torchvision/models/detection/mask_rcnn.py +++ b/torchvision/models/detection/mask_rcnn.py @@ -370,7 +370,7 @@ class MaskRCNN_ResNet50_FPN_Weights(WeightsEnum): "mask_map": 34.6, } }, - "_flops": 134.379721, + "_ops": 134.379721, "_weight_size": 169.839934, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, @@ -392,7 +392,7 @@ class MaskRCNN_ResNet50_FPN_V2_Weights(WeightsEnum): "mask_map": 41.8, } }, - "_flops": 333.577360, + "_ops": 333.577360, "_weight_size": 177.219453, "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""", }, diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index 4f08d677ddc..b710e9b9c1f 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -690,7 +690,7 @@ class RetinaNet_ResNet50_FPN_Weights(WeightsEnum): "box_map": 36.4, } }, - "_flops": 151.540437, + "_ops": 151.540437, "_weight_size": 130.267216, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, @@ -711,7 +711,7 @@ class RetinaNet_ResNet50_FPN_V2_Weights(WeightsEnum): "box_map": 41.5, } }, - "_flops": 152.238199, + "_ops": 152.238199, "_weight_size": 146.037091, "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""", }, diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index 19d3d9fb6ce..8cd82cdfc71 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -39,7 +39,7 @@ class SSD300_VGG16_Weights(WeightsEnum): "box_map": 25.1, } }, - "_flops": 34.858153, + "_ops": 34.858153, "_weight_size": 135.988447, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, diff --git a/torchvision/models/detection/ssdlite.py b/torchvision/models/detection/ssdlite.py index 4a4cd833574..99c8fc97f03 100644 --- a/torchvision/models/detection/ssdlite.py +++ b/torchvision/models/detection/ssdlite.py @@ -198,7 +198,7 @@ class SSDLite320_MobileNet_V3_Large_Weights(WeightsEnum): "box_map": 21.3, } }, - "_flops": 0.583172, + "_ops": 0.583172, "_weight_size": 13.417583, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, diff --git a/torchvision/models/efficientnet.py b/torchvision/models/efficientnet.py index 1fa7a9907e2..14f247ac32a 100644 --- a/torchvision/models/efficientnet.py +++ b/torchvision/models/efficientnet.py @@ -464,7 +464,7 @@ class EfficientNet_B0_Weights(WeightsEnum): "acc@5": 93.532, } }, - "_flops": 0.385815, + "_ops": 0.385815, "_weight_size": 20.450974, "_docs": """These weights are ported from the original paper.""", }, @@ -488,7 +488,7 @@ class EfficientNet_B1_Weights(WeightsEnum): "acc@5": 94.186, } }, - "_flops": 0.686803, + "_ops": 0.686803, "_weight_size": 30.133798, "_docs": """These weights are ported from the original paper.""", }, @@ -508,7 +508,7 @@ class EfficientNet_B1_Weights(WeightsEnum): "acc@5": 94.934, } }, - "_flops": 0.686803, + "_ops": 0.686803, "_weight_size": 30.136422, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -536,7 +536,7 @@ class EfficientNet_B2_Weights(WeightsEnum): "acc@5": 95.310, } }, - "_flops": 1.087529, + "_ops": 1.087529, "_weight_size": 35.173593, "_docs": """These weights are ported from the original paper.""", }, @@ -560,7 +560,7 @@ class EfficientNet_B3_Weights(WeightsEnum): "acc@5": 96.054, } }, - "_flops": 1.827141, + "_ops": 1.827141, "_weight_size": 47.183904, "_docs": """These weights are ported from the original paper.""", }, @@ -584,7 +584,7 @@ class EfficientNet_B4_Weights(WeightsEnum): "acc@5": 96.594, } }, - "_flops": 4.393771, + "_ops": 4.393771, "_weight_size": 74.489011, "_docs": """These weights are ported from the original paper.""", }, @@ -608,7 +608,7 @@ class EfficientNet_B5_Weights(WeightsEnum): "acc@5": 96.628, } }, - "_flops": 10.266385, + "_ops": 10.266385, "_weight_size": 116.863912, "_docs": """These weights are ported from the original paper.""", }, @@ -632,7 +632,7 @@ class EfficientNet_B6_Weights(WeightsEnum): "acc@5": 96.916, } }, - "_flops": 19.067594, + "_ops": 19.067594, "_weight_size": 165.361524, "_docs": """These weights are ported from the original paper.""", }, @@ -656,7 +656,7 @@ class EfficientNet_B7_Weights(WeightsEnum): "acc@5": 96.908, } }, - "_flops": 37.745884, + "_ops": 37.745884, "_weight_size": 254.675393, "_docs": """These weights are ported from the original paper.""", }, @@ -682,7 +682,7 @@ class EfficientNet_V2_S_Weights(WeightsEnum): "acc@5": 96.878, } }, - "_flops": 8.365617, + "_ops": 8.365617, "_weight_size": 82.703832, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -712,7 +712,7 @@ class EfficientNet_V2_M_Weights(WeightsEnum): "acc@5": 97.156, } }, - "_flops": 24.582460, + "_ops": 24.582460, "_weight_size": 208.010186, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -745,7 +745,7 @@ class EfficientNet_V2_L_Weights(WeightsEnum): "acc@5": 97.788, } }, - "_flops": 56.079699, + "_ops": 56.079699, "_weight_size": 454.573071, "_docs": """These weights are ported from the original paper.""", }, diff --git a/torchvision/models/googlenet.py b/torchvision/models/googlenet.py index 3172bf52b9d..123ee930ce5 100644 --- a/torchvision/models/googlenet.py +++ b/torchvision/models/googlenet.py @@ -290,7 +290,7 @@ class GoogLeNet_Weights(WeightsEnum): "acc@5": 89.530, } }, - "_flops": 1.498376, + "_ops": 1.498376, "_weight_size": 49.731288, "_docs": """These weights are ported from the original paper.""", }, diff --git a/torchvision/models/inception.py b/torchvision/models/inception.py index f73e9965128..433a9e2dd25 100644 --- a/torchvision/models/inception.py +++ b/torchvision/models/inception.py @@ -422,7 +422,7 @@ class Inception_V3_Weights(WeightsEnum): "acc@5": 93.450, } }, - "_flops": 5.713216, + "_ops": 5.713216, "_weight_size": 103.902575, "_docs": """These weights are ported from the original paper.""", }, diff --git a/torchvision/models/maxvit.py b/torchvision/models/maxvit.py index 32cd643187c..c3be63446ee 100644 --- a/torchvision/models/maxvit.py +++ b/torchvision/models/maxvit.py @@ -785,7 +785,7 @@ class MaxVit_T_Weights(WeightsEnum): "acc@5": 96.722, } }, - "_flops": 5.558047, + "_ops": 5.558047, "_weight_size": 118.769322, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, diff --git a/torchvision/models/mnasnet.py b/torchvision/models/mnasnet.py index 46e99ec64b7..31812d941c0 100644 --- a/torchvision/models/mnasnet.py +++ b/torchvision/models/mnasnet.py @@ -231,7 +231,7 @@ class MNASNet0_5_Weights(WeightsEnum): "acc@5": 87.490, } }, - "_flops": 0.104456, + "_ops": 0.104456, "_weight_size": 8.591165, "_docs": """These weights reproduce closely the results of the paper.""", }, @@ -253,7 +253,7 @@ class MNASNet0_75_Weights(WeightsEnum): "acc@5": 90.496, } }, - "_flops": 0.215493, + "_ops": 0.215493, "_weight_size": 12.302564, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe @@ -277,7 +277,7 @@ class MNASNet1_0_Weights(WeightsEnum): "acc@5": 91.510, } }, - "_flops": 0.314416, + "_ops": 0.314416, "_weight_size": 16.915318, "_docs": """These weights reproduce closely the results of the paper.""", }, @@ -299,7 +299,7 @@ class MNASNet1_3_Weights(WeightsEnum): "acc@5": 93.522, } }, - "_flops": 0.526362, + "_ops": 0.526362, "_weight_size": 24.245618, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe diff --git a/torchvision/models/mobilenetv2.py b/torchvision/models/mobilenetv2.py index d87f771c640..bcb8df8b93b 100644 --- a/torchvision/models/mobilenetv2.py +++ b/torchvision/models/mobilenetv2.py @@ -194,7 +194,7 @@ class MobileNet_V2_Weights(WeightsEnum): "acc@5": 90.286, } }, - "_flops": 0.300774, + "_ops": 0.300774, "_weight_size": 13.554546, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -211,7 +211,7 @@ class MobileNet_V2_Weights(WeightsEnum): "acc@5": 90.822, } }, - "_flops": 0.300774, + "_ops": 0.300774, "_weight_size": 13.598035, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's diff --git a/torchvision/models/mobilenetv3.py b/torchvision/models/mobilenetv3.py index 1e8814b85eb..14f87997ab5 100644 --- a/torchvision/models/mobilenetv3.py +++ b/torchvision/models/mobilenetv3.py @@ -307,7 +307,7 @@ class MobileNet_V3_Large_Weights(WeightsEnum): "acc@5": 91.340, } }, - "_flops": 0.216590, + "_ops": 0.216590, "_weight_size": 21.113799, "_docs": """These weights were trained from scratch by using a simple training recipe.""", }, @@ -325,7 +325,7 @@ class MobileNet_V3_Large_Weights(WeightsEnum): "acc@5": 92.566, } }, - "_flops": 0.216590, + "_ops": 0.216590, "_weight_size": 21.106828, "_docs": """ These weights improve marginally upon the results of the original paper by using a modified version of @@ -351,7 +351,7 @@ class MobileNet_V3_Small_Weights(WeightsEnum): "acc@5": 87.402, } }, - "_flops": 0.056510, + "_ops": 0.056510, "_weight_size": 9.829093, "_docs": """ These weights improve upon the results of the original paper by using a simple training recipe. diff --git a/torchvision/models/optical_flow/raft.py b/torchvision/models/optical_flow/raft.py index d063bf55650..e83b727a8fd 100644 --- a/torchvision/models/optical_flow/raft.py +++ b/torchvision/models/optical_flow/raft.py @@ -552,7 +552,7 @@ class Raft_Large_Weights(WeightsEnum): "Sintel-Train-Finalpass": {"epe": 2.7894}, "Kitti-Train": {"per_image_epe": 5.0172, "fl_all": 17.4506}, }, - "_flops": 211.007046, + "_ops": 211.007046, "_weight_size": 20.128829, "_docs": """These weights were ported from the original paper. They are trained on :class:`~torchvision.datasets.FlyingChairs` + @@ -572,7 +572,7 @@ class Raft_Large_Weights(WeightsEnum): "Sintel-Train-Finalpass": {"epe": 2.7161}, "Kitti-Train": {"per_image_epe": 4.5118, "fl_all": 16.0679}, }, - "_flops": 211.007046, + "_ops": 211.007046, "_weight_size": 20.128829, "_docs": """These weights were trained from scratch on :class:`~torchvision.datasets.FlyingChairs` + @@ -592,7 +592,7 @@ class Raft_Large_Weights(WeightsEnum): "Sintel-Test-Cleanpass": {"epe": 1.94}, "Sintel-Test-Finalpass": {"epe": 3.18}, }, - "_flops": 211.007046, + "_ops": 211.007046, "_weight_size": 20.128829, "_docs": """ These weights were ported from the original paper. They are @@ -618,7 +618,7 @@ class Raft_Large_Weights(WeightsEnum): "Sintel-Test-Cleanpass": {"epe": 1.819}, "Sintel-Test-Finalpass": {"epe": 3.067}, }, - "_flops": 211.007046, + "_ops": 211.007046, "_weight_size": 20.128829, "_docs": """ These weights were trained from scratch. They are @@ -644,7 +644,7 @@ class Raft_Large_Weights(WeightsEnum): "_metrics": { "Kitti-Test": {"fl_all": 5.10}, }, - "_flops": 211.007046, + "_ops": 211.007046, "_weight_size": 20.128829, "_docs": """ These weights were ported from the original paper. They are @@ -667,7 +667,7 @@ class Raft_Large_Weights(WeightsEnum): "_metrics": { "Kitti-Test": {"fl_all": 5.19}, }, - "_flops": 211.007046, + "_ops": 211.007046, "_weight_size": 20.128829, "_docs": """ These weights were trained from scratch. They are @@ -710,7 +710,7 @@ class Raft_Small_Weights(WeightsEnum): "Sintel-Train-Finalpass": {"epe": 3.2790}, "Kitti-Train": {"per_image_epe": 7.6557, "fl_all": 25.2801}, }, - "_flops": 47.655158, + "_ops": 47.655158, "_weight_size": 3.820600, "_docs": """These weights were ported from the original paper. They are trained on :class:`~torchvision.datasets.FlyingChairs` + @@ -729,7 +729,7 @@ class Raft_Small_Weights(WeightsEnum): "Sintel-Train-Finalpass": {"epe": 3.2831}, "Kitti-Train": {"per_image_epe": 7.5978, "fl_all": 25.2369}, }, - "_flops": 47.655158, + "_ops": 47.655158, "_weight_size": 3.820600, "_docs": """These weights were trained from scratch on :class:`~torchvision.datasets.FlyingChairs` + diff --git a/torchvision/models/regnet.py b/torchvision/models/regnet.py index 791656d39b4..3db784f8042 100644 --- a/torchvision/models/regnet.py +++ b/torchvision/models/regnet.py @@ -428,7 +428,7 @@ class RegNet_Y_400MF_Weights(WeightsEnum): "acc@5": 91.716, } }, - "_flops": 0.401843, + "_ops": 0.401843, "_weight_size": 16.805909, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -446,7 +446,7 @@ class RegNet_Y_400MF_Weights(WeightsEnum): "acc@5": 92.742, } }, - "_flops": 0.401843, + "_ops": 0.401843, "_weight_size": 16.805909, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -472,7 +472,7 @@ class RegNet_Y_800MF_Weights(WeightsEnum): "acc@5": 93.136, } }, - "_flops": 0.833856, + "_ops": 0.833856, "_weight_size": 24.773644, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -490,7 +490,7 @@ class RegNet_Y_800MF_Weights(WeightsEnum): "acc@5": 94.502, } }, - "_flops": 0.833856, + "_ops": 0.833856, "_weight_size": 24.773522, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -516,7 +516,7 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum): "acc@5": 93.966, } }, - "_flops": 1.612076, + "_ops": 1.612076, "_weight_size": 43.152310, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -534,7 +534,7 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum): "acc@5": 95.444, } }, - "_flops": 1.612076, + "_ops": 1.612076, "_weight_size": 43.152310, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -560,7 +560,7 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum): "acc@5": 94.576, } }, - "_flops": 3.176483, + "_ops": 3.176483, "_weight_size": 74.566991, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -578,7 +578,7 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum): "acc@5": 95.972, } }, - "_flops": 3.176483, + "_ops": 3.176483, "_weight_size": 74.566991, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -604,7 +604,7 @@ class RegNet_Y_8GF_Weights(WeightsEnum): "acc@5": 95.048, } }, - "_flops": 8.473071, + "_ops": 8.473071, "_weight_size": 150.701436, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -622,7 +622,7 @@ class RegNet_Y_8GF_Weights(WeightsEnum): "acc@5": 96.330, } }, - "_flops": 8.473071, + "_ops": 8.473071, "_weight_size": 150.701436, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -648,7 +648,7 @@ class RegNet_Y_16GF_Weights(WeightsEnum): "acc@5": 95.240, } }, - "_flops": 15.911510, + "_ops": 15.911510, "_weight_size": 319.490335, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -666,7 +666,7 @@ class RegNet_Y_16GF_Weights(WeightsEnum): "acc@5": 96.328, } }, - "_flops": 15.911510, + "_ops": 15.911510, "_weight_size": 319.490335, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -689,7 +689,7 @@ class RegNet_Y_16GF_Weights(WeightsEnum): "acc@5": 98.054, } }, - "_flops": 46.734897, + "_ops": 46.734897, "_weight_size": 319.490335, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original @@ -712,7 +712,7 @@ class RegNet_Y_16GF_Weights(WeightsEnum): "acc@5": 97.244, } }, - "_flops": 15.911510, + "_ops": 15.911510, "_weight_size": 319.490335, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk @@ -737,7 +737,7 @@ class RegNet_Y_32GF_Weights(WeightsEnum): "acc@5": 95.340, } }, - "_flops": 32.279553, + "_ops": 32.279553, "_weight_size": 554.076371, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -755,7 +755,7 @@ class RegNet_Y_32GF_Weights(WeightsEnum): "acc@5": 96.498, } }, - "_flops": 32.279553, + "_ops": 32.279553, "_weight_size": 554.076371, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -778,7 +778,7 @@ class RegNet_Y_32GF_Weights(WeightsEnum): "acc@5": 98.362, } }, - "_flops": 94.826458, + "_ops": 94.826458, "_weight_size": 554.076371, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original @@ -801,7 +801,7 @@ class RegNet_Y_32GF_Weights(WeightsEnum): "acc@5": 97.480, } }, - "_flops": 32.279553, + "_ops": 32.279553, "_weight_size": 554.076371, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk @@ -827,7 +827,7 @@ class RegNet_Y_128GF_Weights(WeightsEnum): "acc@5": 98.682, } }, - "_flops": 374.570006, + "_ops": 374.570006, "_weight_size": 2461.563993, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original @@ -850,7 +850,7 @@ class RegNet_Y_128GF_Weights(WeightsEnum): "acc@5": 97.844, } }, - "_flops": 127.517816, + "_ops": 127.517816, "_weight_size": 2461.563993, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk @@ -875,7 +875,7 @@ class RegNet_X_400MF_Weights(WeightsEnum): "acc@5": 90.950, } }, - "_flops": 0.413813, + "_ops": 0.413813, "_weight_size": 21.258035, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -893,7 +893,7 @@ class RegNet_X_400MF_Weights(WeightsEnum): "acc@5": 92.322, } }, - "_flops": 0.413813, + "_ops": 0.413813, "_weight_size": 21.256570, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -919,7 +919,7 @@ class RegNet_X_800MF_Weights(WeightsEnum): "acc@5": 92.348, } }, - "_flops": 0.799700, + "_ops": 0.799700, "_weight_size": 27.945130, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -937,7 +937,7 @@ class RegNet_X_800MF_Weights(WeightsEnum): "acc@5": 93.826, } }, - "_flops": 0.799700, + "_ops": 0.799700, "_weight_size": 27.945130, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -963,7 +963,7 @@ class RegNet_X_1_6GF_Weights(WeightsEnum): "acc@5": 93.440, } }, - "_flops": 1.602850, + "_ops": 1.602850, "_weight_size": 35.339471, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -981,7 +981,7 @@ class RegNet_X_1_6GF_Weights(WeightsEnum): "acc@5": 94.922, } }, - "_flops": 1.602850, + "_ops": 1.602850, "_weight_size": 35.339471, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -1007,7 +1007,7 @@ class RegNet_X_3_2GF_Weights(WeightsEnum): "acc@5": 93.992, } }, - "_flops": 3.176622, + "_ops": 3.176622, "_weight_size": 58.755979, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -1025,7 +1025,7 @@ class RegNet_X_3_2GF_Weights(WeightsEnum): "acc@5": 95.430, } }, - "_flops": 3.176622, + "_ops": 3.176622, "_weight_size": 58.755979, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -1051,7 +1051,7 @@ class RegNet_X_8GF_Weights(WeightsEnum): "acc@5": 94.686, } }, - "_flops": 7.995132, + "_ops": 7.995132, "_weight_size": 151.455937, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -1069,7 +1069,7 @@ class RegNet_X_8GF_Weights(WeightsEnum): "acc@5": 95.678, } }, - "_flops": 7.995132, + "_ops": 7.995132, "_weight_size": 151.455937, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -1095,7 +1095,7 @@ class RegNet_X_16GF_Weights(WeightsEnum): "acc@5": 94.944, } }, - "_flops": 15.940755, + "_ops": 15.940755, "_weight_size": 207.627419, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -1113,7 +1113,7 @@ class RegNet_X_16GF_Weights(WeightsEnum): "acc@5": 96.196, } }, - "_flops": 15.940755, + "_ops": 15.940755, "_weight_size": 207.627419, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -1139,7 +1139,7 @@ class RegNet_X_32GF_Weights(WeightsEnum): "acc@5": 95.248, } }, - "_flops": 31.735930, + "_ops": 31.735930, "_weight_size": 412.039433, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -1157,7 +1157,7 @@ class RegNet_X_32GF_Weights(WeightsEnum): "acc@5": 96.288, } }, - "_flops": 31.735930, + "_ops": 31.735930, "_weight_size": 412.039433, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's diff --git a/torchvision/models/resnet.py b/torchvision/models/resnet.py index 9a580baf31b..1bc97ca5355 100644 --- a/torchvision/models/resnet.py +++ b/torchvision/models/resnet.py @@ -323,7 +323,7 @@ class ResNet18_Weights(WeightsEnum): "acc@5": 89.078, } }, - "_flops": 1.814073, + "_ops": 1.814073, "_weight_size": 44.661113, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -345,7 +345,7 @@ class ResNet34_Weights(WeightsEnum): "acc@5": 91.420, } }, - "_flops": 3.663761, + "_ops": 3.663761, "_weight_size": 83.274669, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -367,7 +367,7 @@ class ResNet50_Weights(WeightsEnum): "acc@5": 92.862, } }, - "_flops": 4.089184, + "_ops": 4.089184, "_weight_size": 97.780545, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -385,7 +385,7 @@ class ResNet50_Weights(WeightsEnum): "acc@5": 95.434, } }, - "_flops": 4.089184, + "_ops": 4.089184, "_weight_size": 97.790162, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe @@ -410,7 +410,7 @@ class ResNet101_Weights(WeightsEnum): "acc@5": 93.546, } }, - "_flops": 7.801405, + "_ops": 7.801405, "_weight_size": 170.511188, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -428,7 +428,7 @@ class ResNet101_Weights(WeightsEnum): "acc@5": 95.780, } }, - "_flops": 7.801405, + "_ops": 7.801405, "_weight_size": 170.530362, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe @@ -453,7 +453,7 @@ class ResNet152_Weights(WeightsEnum): "acc@5": 94.046, } }, - "_flops": 11.513627, + "_ops": 11.513627, "_weight_size": 230.434152, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -471,7 +471,7 @@ class ResNet152_Weights(WeightsEnum): "acc@5": 96.002, } }, - "_flops": 11.513627, + "_ops": 11.513627, "_weight_size": 230.473687, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe @@ -496,7 +496,7 @@ class ResNeXt50_32X4D_Weights(WeightsEnum): "acc@5": 93.698, } }, - "_flops": 4.230480, + "_ops": 4.230480, "_weight_size": 95.788646, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -514,7 +514,7 @@ class ResNeXt50_32X4D_Weights(WeightsEnum): "acc@5": 95.340, } }, - "_flops": 4.230480, + "_ops": 4.230480, "_weight_size": 95.833192, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe @@ -539,7 +539,7 @@ class ResNeXt101_32X8D_Weights(WeightsEnum): "acc@5": 94.526, } }, - "_flops": 16.414015, + "_ops": 16.414015, "_weight_size": 339.586349, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -557,7 +557,7 @@ class ResNeXt101_32X8D_Weights(WeightsEnum): "acc@5": 96.228, } }, - "_flops": 16.414015, + "_ops": 16.414015, "_weight_size": 339.673062, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe @@ -582,7 +582,7 @@ class ResNeXt101_64X4D_Weights(WeightsEnum): "acc@5": 96.454, } }, - "_flops": 15.460270, + "_ops": 15.460270, "_weight_size": 319.317594, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe @@ -607,7 +607,7 @@ class Wide_ResNet50_2_Weights(WeightsEnum): "acc@5": 94.086, } }, - "_flops": 11.398021, + "_ops": 11.398021, "_weight_size": 131.820194, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -625,7 +625,7 @@ class Wide_ResNet50_2_Weights(WeightsEnum): "acc@5": 95.758, } }, - "_flops": 11.398021, + "_ops": 11.398021, "_weight_size": 263.124207, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe @@ -650,7 +650,7 @@ class Wide_ResNet101_2_Weights(WeightsEnum): "acc@5": 94.284, } }, - "_flops": 22.753051, + "_ops": 22.753051, "_weight_size": 242.896219, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -668,7 +668,7 @@ class Wide_ResNet101_2_Weights(WeightsEnum): "acc@5": 96.020, } }, - "_flops": 22.753051, + "_ops": 22.753051, "_weight_size": 484.747281, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe diff --git a/torchvision/models/segmentation/deeplabv3.py b/torchvision/models/segmentation/deeplabv3.py index dc5e5b2be55..ef4d807f461 100644 --- a/torchvision/models/segmentation/deeplabv3.py +++ b/torchvision/models/segmentation/deeplabv3.py @@ -152,7 +152,7 @@ class DeepLabV3_ResNet50_Weights(WeightsEnum): "pixel_acc": 92.4, } }, - "_flops": 178.721945, + "_ops": 178.721945, "_weight_size": 160.514977, }, ) @@ -173,7 +173,7 @@ class DeepLabV3_ResNet101_Weights(WeightsEnum): "pixel_acc": 92.4, } }, - "_flops": 258.743039, + "_ops": 258.743039, "_weight_size": 233.216800, }, ) @@ -194,7 +194,7 @@ class DeepLabV3_MobileNet_V3_Large_Weights(WeightsEnum): "pixel_acc": 91.2, } }, - "_flops": 10.452448, + "_ops": 10.452448, "_weight_size": 42.301330, }, ) diff --git a/torchvision/models/segmentation/fcn.py b/torchvision/models/segmentation/fcn.py index 046c85b4d18..6486b3aee7e 100644 --- a/torchvision/models/segmentation/fcn.py +++ b/torchvision/models/segmentation/fcn.py @@ -71,7 +71,7 @@ class FCN_ResNet50_Weights(WeightsEnum): "pixel_acc": 91.4, } }, - "_flops": 152.716512, + "_ops": 152.716512, "_weight_size": 135.009211, }, ) @@ -92,7 +92,7 @@ class FCN_ResNet101_Weights(WeightsEnum): "pixel_acc": 91.9, } }, - "_flops": 232.737606, + "_ops": 232.737606, "_weight_size": 207.711034, }, ) diff --git a/torchvision/models/segmentation/lraspp.py b/torchvision/models/segmentation/lraspp.py index c0ec738d786..d00cfa96e0e 100644 --- a/torchvision/models/segmentation/lraspp.py +++ b/torchvision/models/segmentation/lraspp.py @@ -108,7 +108,7 @@ class LRASPP_MobileNet_V3_Large_Weights(WeightsEnum): "pixel_acc": 91.2, } }, - "_flops": 2.086220, + "_ops": 2.086220, "_weight_size": 12.490331, "_docs": """ These weights were trained on a subset of COCO, using only the 20 categories that are present in the diff --git a/torchvision/models/shufflenetv2.py b/torchvision/models/shufflenetv2.py index d91353a442d..2c62fc1d6d8 100644 --- a/torchvision/models/shufflenetv2.py +++ b/torchvision/models/shufflenetv2.py @@ -204,7 +204,7 @@ class ShuffleNet_V2_X0_5_Weights(WeightsEnum): "acc@5": 81.746, } }, - "_flops": 0.040476, + "_ops": 0.040476, "_weight_size": 5.281570, "_docs": """These weights were trained from scratch to reproduce closely the results of the paper.""", }, @@ -226,7 +226,7 @@ class ShuffleNet_V2_X1_0_Weights(WeightsEnum): "acc@5": 88.316, } }, - "_flops": 0.144908, + "_ops": 0.144908, "_weight_size": 8.791250, "_docs": """These weights were trained from scratch to reproduce closely the results of the paper.""", }, @@ -248,7 +248,7 @@ class ShuffleNet_V2_X1_5_Weights(WeightsEnum): "acc@5": 91.086, } }, - "_flops": 0.295759, + "_ops": 0.295759, "_weight_size": 13.557034, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe @@ -273,7 +273,7 @@ class ShuffleNet_V2_X2_0_Weights(WeightsEnum): "acc@5": 93.006, } }, - "_flops": 0.583253, + "_ops": 0.583253, "_weight_size": 28.432767, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe diff --git a/torchvision/models/squeezenet.py b/torchvision/models/squeezenet.py index ca6c144f35d..8b7609da655 100644 --- a/torchvision/models/squeezenet.py +++ b/torchvision/models/squeezenet.py @@ -135,7 +135,7 @@ class SqueezeNet1_0_Weights(WeightsEnum): "acc@5": 80.420, } }, - "_flops": 0.818925, + "_ops": 0.818925, "_weight_size": 4.778434, }, ) @@ -156,7 +156,7 @@ class SqueezeNet1_1_Weights(WeightsEnum): "acc@5": 80.624, } }, - "_flops": 0.349152, + "_ops": 0.349152, "_weight_size": 4.729117, }, ) diff --git a/torchvision/models/swin_transformer.py b/torchvision/models/swin_transformer.py index c5d7d8d04f8..c18cff6e19a 100644 --- a/torchvision/models/swin_transformer.py +++ b/torchvision/models/swin_transformer.py @@ -660,7 +660,7 @@ class Swin_T_Weights(WeightsEnum): "acc@5": 95.776, } }, - "_flops": 4.490567, + "_ops": 4.490567, "_weight_size": 108.190383, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, @@ -685,7 +685,7 @@ class Swin_S_Weights(WeightsEnum): "acc@5": 96.360, } }, - "_flops": 8.740875, + "_ops": 8.740875, "_weight_size": 189.786254, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, @@ -710,7 +710,7 @@ class Swin_B_Weights(WeightsEnum): "acc@5": 96.640, } }, - "_flops": 15.430947, + "_ops": 15.430947, "_weight_size": 335.363585, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, @@ -735,7 +735,7 @@ class Swin_V2_T_Weights(WeightsEnum): "acc@5": 96.132, } }, - "_flops": 5.939690, + "_ops": 5.939690, "_weight_size": 108.625840, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, @@ -760,7 +760,7 @@ class Swin_V2_S_Weights(WeightsEnum): "acc@5": 96.816, } }, - "_flops": 11.545857, + "_ops": 11.545857, "_weight_size": 190.674577, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, @@ -785,7 +785,7 @@ class Swin_V2_B_Weights(WeightsEnum): "acc@5": 96.864, } }, - "_flops": 20.325134, + "_ops": 20.325134, "_weight_size": 336.371781, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, diff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py index 8bb5b806b45..e83970d61af 100644 --- a/torchvision/models/vgg.py +++ b/torchvision/models/vgg.py @@ -127,7 +127,7 @@ class VGG11_Weights(WeightsEnum): "acc@5": 88.628, } }, - "_flops": 7.609090, + "_ops": 7.609090, "_weight_size": 506.840077, }, ) @@ -147,7 +147,7 @@ class VGG11_BN_Weights(WeightsEnum): "acc@5": 89.810, } }, - "_flops": 7.609090, + "_ops": 7.609090, "_weight_size": 506.881400, }, ) @@ -167,7 +167,7 @@ class VGG13_Weights(WeightsEnum): "acc@5": 89.246, } }, - "_flops": 11.308466, + "_ops": 11.308466, "_weight_size": 507.545068, }, ) @@ -187,7 +187,7 @@ class VGG13_BN_Weights(WeightsEnum): "acc@5": 90.374, } }, - "_flops": 11.308466, + "_ops": 11.308466, "_weight_size": 507.589627, }, ) @@ -207,7 +207,7 @@ class VGG16_Weights(WeightsEnum): "acc@5": 90.382, } }, - "_flops": 15.470264, + "_ops": 15.470264, "_weight_size": 527.795678, }, ) @@ -231,7 +231,7 @@ class VGG16_Weights(WeightsEnum): "acc@5": float("nan"), } }, - "_flops": 15.470264, + "_ops": 15.470264, "_weight_size": 527.801824, "_docs": """ These weights can't be used for classification because they are missing values in the `classifier` @@ -256,7 +256,7 @@ class VGG16_BN_Weights(WeightsEnum): "acc@5": 91.516, } }, - "_flops": 15.470264, + "_ops": 15.470264, "_weight_size": 527.866207, }, ) @@ -276,7 +276,7 @@ class VGG19_Weights(WeightsEnum): "acc@5": 90.876, } }, - "_flops": 19.632062, + "_ops": 19.632062, "_weight_size": 548.051225, }, ) @@ -296,7 +296,7 @@ class VGG19_BN_Weights(WeightsEnum): "acc@5": 91.842, } }, - "_flops": 19.632062, + "_ops": 19.632062, "_weight_size": 548.142819, }, ) diff --git a/torchvision/models/video/mvit.py b/torchvision/models/video/mvit.py index 42be6470066..4a2db21a997 100644 --- a/torchvision/models/video/mvit.py +++ b/torchvision/models/video/mvit.py @@ -624,7 +624,7 @@ class MViT_V1_B_Weights(WeightsEnum): "acc@5": 93.582, } }, - "_flops": 70.599408, + "_ops": 70.599408, "_weight_size": 139.764235, }, ) @@ -657,7 +657,7 @@ class MViT_V2_S_Weights(WeightsEnum): "acc@5": 94.665, } }, - "_flops": 64.223816, + "_ops": 64.223816, "_weight_size": 131.883704, }, ) diff --git a/torchvision/models/video/resnet.py b/torchvision/models/video/resnet.py index b0873f50bd0..022aa2b3da1 100644 --- a/torchvision/models/video/resnet.py +++ b/torchvision/models/video/resnet.py @@ -332,7 +332,7 @@ class R3D_18_Weights(WeightsEnum): "acc@5": 83.479, } }, - "_flops": 40.696553, + "_ops": 40.696553, "_weight_size": 127.359406, }, ) @@ -352,7 +352,7 @@ class MC3_18_Weights(WeightsEnum): "acc@5": 84.130, } }, - "_flops": 43.342635, + "_ops": 43.342635, "_weight_size": 44.671906, }, ) @@ -372,7 +372,7 @@ class R2Plus1D_18_Weights(WeightsEnum): "acc@5": 86.175, } }, - "_flops": 40.519081, + "_ops": 40.519081, "_weight_size": 120.318409, }, ) diff --git a/torchvision/models/video/s3d.py b/torchvision/models/video/s3d.py index 64df9eed469..b0429e2b246 100644 --- a/torchvision/models/video/s3d.py +++ b/torchvision/models/video/s3d.py @@ -175,7 +175,7 @@ class S3D_Weights(WeightsEnum): "acc@5": 88.050, } }, - "_flops": 17.978771, + "_ops": 17.978771, "_weight_size": 31.971929, }, ) diff --git a/torchvision/models/vision_transformer.py b/torchvision/models/vision_transformer.py index 6e042097098..684755ef4c3 100644 --- a/torchvision/models/vision_transformer.py +++ b/torchvision/models/vision_transformer.py @@ -363,7 +363,7 @@ class ViT_B_16_Weights(WeightsEnum): "acc@5": 95.318, } }, - "_flops": 17.563828, + "_ops": 17.563828, "_weight_size": 330.284623, "_docs": """ These weights were trained from scratch by using a modified version of `DeIT @@ -389,7 +389,7 @@ class ViT_B_16_Weights(WeightsEnum): "acc@5": 97.650, } }, - "_flops": 55.484350, + "_ops": 55.484350, "_weight_size": 331.397904, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original @@ -416,7 +416,7 @@ class ViT_B_16_Weights(WeightsEnum): "acc@5": 96.180, } }, - "_flops": 17.563828, + "_ops": 17.563828, "_weight_size": 330.284623, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk @@ -442,7 +442,7 @@ class ViT_B_32_Weights(WeightsEnum): "acc@5": 92.466, } }, - "_flops": 4.409186, + "_ops": 4.409186, "_weight_size": 336.603959, "_docs": """ These weights were trained from scratch by using a modified version of `DeIT @@ -468,7 +468,7 @@ class ViT_L_16_Weights(WeightsEnum): "acc@5": 94.638, } }, - "_flops": 61.554713, + "_ops": 61.554713, "_weight_size": 1161.023240, "_docs": """ These weights were trained from scratch by using a modified version of TorchVision's @@ -495,7 +495,7 @@ class ViT_L_16_Weights(WeightsEnum): "acc@5": 98.512, } }, - "_flops": 361.986286, + "_ops": 361.986286, "_weight_size": 1164.257615, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original @@ -522,7 +522,7 @@ class ViT_L_16_Weights(WeightsEnum): "acc@5": 97.422, } }, - "_flops": 61.554713, + "_ops": 61.554713, "_weight_size": 1161.023240, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk @@ -548,7 +548,7 @@ class ViT_L_32_Weights(WeightsEnum): "acc@5": 93.07, } }, - "_flops": 15.377539, + "_ops": 15.377539, "_weight_size": 1169.448960, "_docs": """ These weights were trained from scratch by using a modified version of `DeIT @@ -578,7 +578,7 @@ class ViT_H_14_Weights(WeightsEnum): "acc@5": 98.694, } }, - "_flops": 1016.716764, + "_ops": 1016.716764, "_weight_size": 2416.643174, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original @@ -605,7 +605,7 @@ class ViT_H_14_Weights(WeightsEnum): "acc@5": 97.730, } }, - "_flops": 167.295109, + "_ops": 167.295109, "_weight_size": 2411.208604, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk From 86360ee6bff555b7876e38b92c1984ba7787b118 Mon Sep 17 00:00:00 2001 From: Toni Blaslov Date: Fri, 11 Nov 2022 12:13:16 +0000 Subject: [PATCH 08/15] Adding number of operations to quantization models --- torchvision/models/quantization/googlenet.py | 1 + torchvision/models/quantization/inception.py | 1 + torchvision/models/quantization/mobilenetv2.py | 1 + torchvision/models/quantization/mobilenetv3.py | 1 + torchvision/models/quantization/resnet.py | 6 ++++++ torchvision/models/quantization/shufflenetv2.py | 4 ++++ 6 files changed, 14 insertions(+) diff --git a/torchvision/models/quantization/googlenet.py b/torchvision/models/quantization/googlenet.py index 8c3e72fdc25..53d181973b4 100644 --- a/torchvision/models/quantization/googlenet.py +++ b/torchvision/models/quantization/googlenet.py @@ -123,6 +123,7 @@ class GoogLeNet_QuantizedWeights(WeightsEnum): "acc@5": 89.404, } }, + "_ops": 1.498376, "_weight_size": 12.617729, "_docs": """ These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized diff --git a/torchvision/models/quantization/inception.py b/torchvision/models/quantization/inception.py index a8baac65f55..3450ed8307f 100644 --- a/torchvision/models/quantization/inception.py +++ b/torchvision/models/quantization/inception.py @@ -183,6 +183,7 @@ class Inception_V3_QuantizedWeights(WeightsEnum): "acc@5": 93.354, } }, + "_ops": 5.713216, "_weight_size": 23.145652, "_docs": """ These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized diff --git a/torchvision/models/quantization/mobilenetv2.py b/torchvision/models/quantization/mobilenetv2.py index 0b4211f08e2..f64d92ead0d 100644 --- a/torchvision/models/quantization/mobilenetv2.py +++ b/torchvision/models/quantization/mobilenetv2.py @@ -80,6 +80,7 @@ class MobileNet_V2_QuantizedWeights(WeightsEnum): "acc@5": 90.150, } }, + "_ops": 0.300774, "_weight_size": 3.422719, "_docs": """ These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized diff --git a/torchvision/models/quantization/mobilenetv3.py b/torchvision/models/quantization/mobilenetv3.py index 220d1b8e25d..0012b53e172 100644 --- a/torchvision/models/quantization/mobilenetv3.py +++ b/torchvision/models/quantization/mobilenetv3.py @@ -175,6 +175,7 @@ class MobileNet_V3_Large_QuantizedWeights(WeightsEnum): "acc@5": 90.858, } }, + "_ops": 0.216590, "_weight_size": 21.553815, "_docs": """ These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized diff --git a/torchvision/models/quantization/resnet.py b/torchvision/models/quantization/resnet.py index 8074e8e327c..55f03ac66ab 100644 --- a/torchvision/models/quantization/resnet.py +++ b/torchvision/models/quantization/resnet.py @@ -175,6 +175,7 @@ class ResNet18_QuantizedWeights(WeightsEnum): "acc@5": 88.882, } }, + "_ops": 1.814073, "_weight_size": 11.238080, }, ) @@ -195,6 +196,7 @@ class ResNet50_QuantizedWeights(WeightsEnum): "acc@5": 92.814, } }, + "_ops": 4.089184, "_weight_size": 24.758719, }, ) @@ -211,6 +213,7 @@ class ResNet50_QuantizedWeights(WeightsEnum): "acc@5": 94.976, } }, + "_ops": 4.089184, "_weight_size": 24.953237, }, ) @@ -231,6 +234,7 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum): "acc@5": 94.480, } }, + "_ops": 16.414015, "_weight_size": 86.034273, }, ) @@ -247,6 +251,7 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum): "acc@5": 96.132, } }, + "_ops": 16.414015, "_weight_size": 86.645276, }, ) @@ -268,6 +273,7 @@ class ResNeXt101_64X4D_QuantizedWeights(WeightsEnum): "acc@5": 96.326, } }, + "_ops": 15.460270, "_weight_size": 81.556409, }, ) diff --git a/torchvision/models/quantization/shufflenetv2.py b/torchvision/models/quantization/shufflenetv2.py index 54c58f58a70..c98ea4ed16f 100644 --- a/torchvision/models/quantization/shufflenetv2.py +++ b/torchvision/models/quantization/shufflenetv2.py @@ -139,6 +139,7 @@ class ShuffleNet_V2_X0_5_QuantizedWeights(WeightsEnum): "acc@5": 79.780, } }, + "_ops": 0.040476, "_weight_size": 1.500871, }, ) @@ -159,6 +160,7 @@ class ShuffleNet_V2_X1_0_QuantizedWeights(WeightsEnum): "acc@5": 87.582, } }, + "_ops": 0.144908, "_weight_size": 2.333858, }, ) @@ -180,6 +182,7 @@ class ShuffleNet_V2_X1_5_QuantizedWeights(WeightsEnum): "acc@5": 90.700, } }, + "_ops": 0.295759, "_weight_size": 3.671769, }, ) @@ -201,6 +204,7 @@ class ShuffleNet_V2_X2_0_QuantizedWeights(WeightsEnum): "acc@5": 92.488, } }, + "_ops": 0.583253, "_weight_size": 7.467118, }, ) From 801a97922d9c84dc0d4adb3ec0c78f87d47a3013 Mon Sep 17 00:00:00 2001 From: Toni Blaslov Date: Fri, 11 Nov 2022 12:23:15 +0000 Subject: [PATCH 09/15] Reflecting _flops change to _ops --- docs/source/conf.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 4dcb0cb61cd..6a260c3bb29 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -385,12 +385,10 @@ def generate_weights_table(module, table_name, metrics, dataset, include_pattern if exclude_patterns is not None: weights = [w for w in weights if all(p not in str(w) for p in exclude_patterns)] - rich_metadata = ["Size (MB)"] - if "_flops" in weights[0].meta: # assumes same rich meta for all models in module - rich_metadata = ["GFLOPs"] + rich_metadata + ops_name = "GOPs" if "QuantizedWeights" in weights_endswith else "GFLOPs" metrics_keys, metrics_names = zip(*metrics) - column_names = ["Weight"] + list(metrics_names) + ["Params"] + rich_metadata + ["Recipe"] # Final column order + column_names = ["Weight"] + list(metrics_names) + ["Params"] + [ops_name, "Size (MB)", "Recipe"] # Final column order column_names = [f"**{name}**" for name in column_names] # Add bold content = [] @@ -399,17 +397,14 @@ def generate_weights_table(module, table_name, metrics, dataset, include_pattern f":class:`{w} <{type(w).__name__}>`", *(w.meta["_metrics"][dataset][metric] for metric in metrics_keys), f"{w.meta['num_params']/1e6:.1f}M", + f"{w.meta['_ops']:.3f}", + f"{round(w.meta['_weight_size'], 1):.1f}", + f"`link <{w.meta['recipe']}>`__", ] - if "_flops" in w.meta: - row.append(f"{w.meta['_flops']:.3f}") - - row.append(f"{round(w.meta['_weight_size'], 1):.1f}") - row.append(f"`link <{w.meta['recipe']}>`__") - content.append(row) - column_widths = ["110"] + ["18"] * len(metrics_names) + ["18"] + ["18"] * len(rich_metadata) + ["10"] + column_widths = ["110"] + ["18"] * len(metrics_names) + ["18"] * 3 + ["10"] widths_table = " ".join(column_widths) table = tabulate(content, headers=column_names, tablefmt="rst") From e0e3ceef84e6ce8adfec6da6a6410b3a9c50ae1b Mon Sep 17 00:00:00 2001 From: Toni Blaslov Date: Fri, 11 Nov 2022 12:41:02 +0000 Subject: [PATCH 10/15] Renamed ops and weight size in individual model doc pages --- docs/source/conf.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/source/conf.py b/docs/source/conf.py index 6a260c3bb29..9ce260cc82a 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -362,6 +362,14 @@ def inject_weight_metadata(app, what, name, obj, options, lines): max_visible = 3 v_sample = ", ".join(v[:max_visible]) v = f"{v_sample}, ... ({len(v)-max_visible} omitted)" if len(v) > max_visible else v_sample + elif k == "_ops": + if obj.__name__.endswith("_QuantizedWeights"): + k = "integer operations (GOPs)" + else: + k = "floating point operations (GFLOPs)" + elif k == "_weight_size": + k = "weights file size (MB)" + table.append((str(k), str(v))) table = tabulate(table, tablefmt="rst") lines += [".. rst-class:: table-weights"] # Custom CSS class, see custom_torchvision.css From de51c2c4f3484c185c78fde69fe65c0c858c193a Mon Sep 17 00:00:00 2001 From: Toni Blaslov Date: Fri, 11 Nov 2022 12:42:27 +0000 Subject: [PATCH 11/15] Linter fixes --- docs/source/conf.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 9ce260cc82a..30c42b75652 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -396,7 +396,9 @@ def generate_weights_table(module, table_name, metrics, dataset, include_pattern ops_name = "GOPs" if "QuantizedWeights" in weights_endswith else "GFLOPs" metrics_keys, metrics_names = zip(*metrics) - column_names = ["Weight"] + list(metrics_names) + ["Params"] + [ops_name, "Size (MB)", "Recipe"] # Final column order + column_names = ( + ["Weight"] + list(metrics_names) + ["Params"] + [ops_name, "Size (MB)", "Recipe"] + ) # Final column order column_names = [f"**{name}**" for name in column_names] # Add bold content = [] From 07a929e8b851242b1c75c9016ee06a8e502961d8 Mon Sep 17 00:00:00 2001 From: Toni Blaslov Date: Fri, 11 Nov 2022 15:04:13 +0000 Subject: [PATCH 12/15] Rounding ops to first decimal --- torchvision/models/alexnet.py | 2 +- torchvision/models/convnext.py | 8 +-- torchvision/models/densenet.py | 8 +-- torchvision/models/detection/faster_rcnn.py | 8 +-- torchvision/models/detection/fcos.py | 2 +- torchvision/models/detection/keypoint_rcnn.py | 4 +- torchvision/models/detection/mask_rcnn.py | 4 +- torchvision/models/detection/retinanet.py | 4 +- torchvision/models/detection/ssd.py | 2 +- torchvision/models/detection/ssdlite.py | 2 +- torchvision/models/efficientnet.py | 24 +++---- torchvision/models/googlenet.py | 2 +- torchvision/models/inception.py | 2 +- torchvision/models/maxvit.py | 2 +- torchvision/models/mnasnet.py | 8 +-- torchvision/models/mobilenetv2.py | 4 +- torchvision/models/mobilenetv3.py | 6 +- torchvision/models/optical_flow/raft.py | 16 ++--- torchvision/models/quantization/googlenet.py | 2 +- torchvision/models/quantization/inception.py | 2 +- .../models/quantization/mobilenetv2.py | 2 +- .../models/quantization/mobilenetv3.py | 2 +- torchvision/models/quantization/resnet.py | 12 ++-- .../models/quantization/shufflenetv2.py | 8 +-- torchvision/models/regnet.py | 68 +++++++++---------- torchvision/models/resnet.py | 34 +++++----- torchvision/models/segmentation/deeplabv3.py | 6 +- torchvision/models/segmentation/fcn.py | 4 +- torchvision/models/segmentation/lraspp.py | 2 +- torchvision/models/shufflenetv2.py | 8 +-- torchvision/models/squeezenet.py | 4 +- torchvision/models/swin_transformer.py | 12 ++-- torchvision/models/vgg.py | 18 ++--- torchvision/models/video/mvit.py | 4 +- torchvision/models/video/resnet.py | 6 +- torchvision/models/video/s3d.py | 2 +- torchvision/models/vision_transformer.py | 20 +++--- 37 files changed, 162 insertions(+), 162 deletions(-) diff --git a/torchvision/models/alexnet.py b/torchvision/models/alexnet.py index 9d36fb2b7ee..5ca3c2434b4 100644 --- a/torchvision/models/alexnet.py +++ b/torchvision/models/alexnet.py @@ -67,7 +67,7 @@ class AlexNet_Weights(WeightsEnum): "acc@5": 79.066, } }, - "_ops": 0.714188, + "_ops": 0.7, "_weight_size": 233.086501, "_docs": """ These weights reproduce closely the results of the paper using a simplified training recipe. diff --git a/torchvision/models/convnext.py b/torchvision/models/convnext.py index eae447d4f81..d98a64de413 100644 --- a/torchvision/models/convnext.py +++ b/torchvision/models/convnext.py @@ -219,7 +219,7 @@ class ConvNeXt_Tiny_Weights(WeightsEnum): "acc@5": 96.146, } }, - "_ops": 4.455531, + "_ops": 4.5, "_weight_size": 109.118672, }, ) @@ -239,7 +239,7 @@ class ConvNeXt_Small_Weights(WeightsEnum): "acc@5": 96.650, } }, - "_ops": 8.683712, + "_ops": 8.7, "_weight_size": 191.702775, }, ) @@ -259,7 +259,7 @@ class ConvNeXt_Base_Weights(WeightsEnum): "acc@5": 96.870, } }, - "_ops": 15.354729, + "_ops": 15.4, "_weight_size": 338.064286, }, ) @@ -279,7 +279,7 @@ class ConvNeXt_Large_Weights(WeightsEnum): "acc@5": 96.976, } }, - "_ops": 34.361434, + "_ops": 34.4, "_weight_size": 754.537187, }, ) diff --git a/torchvision/models/densenet.py b/torchvision/models/densenet.py index 2288bc4ffa1..b9b51253042 100644 --- a/torchvision/models/densenet.py +++ b/torchvision/models/densenet.py @@ -277,7 +277,7 @@ class DenseNet121_Weights(WeightsEnum): "acc@5": 91.972, } }, - "_ops": 2.834162, + "_ops": 2.8, "_weight_size": 30.844645, }, ) @@ -297,7 +297,7 @@ class DenseNet161_Weights(WeightsEnum): "acc@5": 93.560, } }, - "_ops": 7.727907, + "_ops": 7.7, "_weight_size": 110.369482, }, ) @@ -317,7 +317,7 @@ class DenseNet169_Weights(WeightsEnum): "acc@5": 92.806, } }, - "_ops": 3.359843, + "_ops": 3.4, "_weight_size": 54.708029, }, ) @@ -337,7 +337,7 @@ class DenseNet201_Weights(WeightsEnum): "acc@5": 93.370, } }, - "_ops": 4.291366, + "_ops": 4.3, "_weight_size": 77.373247, }, ) diff --git a/torchvision/models/detection/faster_rcnn.py b/torchvision/models/detection/faster_rcnn.py index 8faff726850..d4434584989 100644 --- a/torchvision/models/detection/faster_rcnn.py +++ b/torchvision/models/detection/faster_rcnn.py @@ -388,7 +388,7 @@ class FasterRCNN_ResNet50_FPN_Weights(WeightsEnum): "box_map": 37.0, } }, - "_ops": 134.379721, + "_ops": 134.4, "_weight_size": 159.743153, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, @@ -409,7 +409,7 @@ class FasterRCNN_ResNet50_FPN_V2_Weights(WeightsEnum): "box_map": 46.7, } }, - "_ops": 280.370729, + "_ops": 280.4, "_weight_size": 167.104394, "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""", }, @@ -430,7 +430,7 @@ class FasterRCNN_MobileNet_V3_Large_FPN_Weights(WeightsEnum): "box_map": 32.8, } }, - "_ops": 4.493592, + "_ops": 4.5, "_weight_size": 74.238593, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, @@ -451,7 +451,7 @@ class FasterRCNN_MobileNet_V3_Large_320_FPN_Weights(WeightsEnum): "box_map": 22.8, } }, - "_ops": 0.718998, + "_ops": 0.7, "_weight_size": 74.238593, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, diff --git a/torchvision/models/detection/fcos.py b/torchvision/models/detection/fcos.py index 3b862c2aada..319fe66465e 100644 --- a/torchvision/models/detection/fcos.py +++ b/torchvision/models/detection/fcos.py @@ -662,7 +662,7 @@ class FCOS_ResNet50_FPN_Weights(WeightsEnum): "box_map": 39.2, } }, - "_ops": 128.207053, + "_ops": 128.2, "_weight_size": 123.607730, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, diff --git a/torchvision/models/detection/keypoint_rcnn.py b/torchvision/models/detection/keypoint_rcnn.py index 5a635265a3e..82ce8430745 100644 --- a/torchvision/models/detection/keypoint_rcnn.py +++ b/torchvision/models/detection/keypoint_rcnn.py @@ -328,7 +328,7 @@ class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum): "kp_map": 61.1, } }, - "_ops": 133.924041, + "_ops": 133.9, "_weight_size": 226.053994, "_docs": """ These weights were produced by following a similar training recipe as on the paper but use a checkpoint @@ -349,7 +349,7 @@ class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum): "kp_map": 65.0, } }, - "_ops": 137.419502, + "_ops": 137.4, "_weight_size": 226.053994, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, diff --git a/torchvision/models/detection/mask_rcnn.py b/torchvision/models/detection/mask_rcnn.py index f8f3c878ed1..af1355ed432 100644 --- a/torchvision/models/detection/mask_rcnn.py +++ b/torchvision/models/detection/mask_rcnn.py @@ -370,7 +370,7 @@ class MaskRCNN_ResNet50_FPN_Weights(WeightsEnum): "mask_map": 34.6, } }, - "_ops": 134.379721, + "_ops": 134.4, "_weight_size": 169.839934, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, @@ -392,7 +392,7 @@ class MaskRCNN_ResNet50_FPN_V2_Weights(WeightsEnum): "mask_map": 41.8, } }, - "_ops": 333.577360, + "_ops": 333.6, "_weight_size": 177.219453, "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""", }, diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index b710e9b9c1f..b87c55d65fe 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -690,7 +690,7 @@ class RetinaNet_ResNet50_FPN_Weights(WeightsEnum): "box_map": 36.4, } }, - "_ops": 151.540437, + "_ops": 151.5, "_weight_size": 130.267216, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, @@ -711,7 +711,7 @@ class RetinaNet_ResNet50_FPN_V2_Weights(WeightsEnum): "box_map": 41.5, } }, - "_ops": 152.238199, + "_ops": 152.2, "_weight_size": 146.037091, "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""", }, diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index 8cd82cdfc71..9d570bc2f5d 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -39,7 +39,7 @@ class SSD300_VGG16_Weights(WeightsEnum): "box_map": 25.1, } }, - "_ops": 34.858153, + "_ops": 34.9, "_weight_size": 135.988447, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, diff --git a/torchvision/models/detection/ssdlite.py b/torchvision/models/detection/ssdlite.py index 99c8fc97f03..e7876ebf1cb 100644 --- a/torchvision/models/detection/ssdlite.py +++ b/torchvision/models/detection/ssdlite.py @@ -198,7 +198,7 @@ class SSDLite320_MobileNet_V3_Large_Weights(WeightsEnum): "box_map": 21.3, } }, - "_ops": 0.583172, + "_ops": 0.6, "_weight_size": 13.417583, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, diff --git a/torchvision/models/efficientnet.py b/torchvision/models/efficientnet.py index 14f247ac32a..88bbd5843a2 100644 --- a/torchvision/models/efficientnet.py +++ b/torchvision/models/efficientnet.py @@ -464,7 +464,7 @@ class EfficientNet_B0_Weights(WeightsEnum): "acc@5": 93.532, } }, - "_ops": 0.385815, + "_ops": 0.4, "_weight_size": 20.450974, "_docs": """These weights are ported from the original paper.""", }, @@ -488,7 +488,7 @@ class EfficientNet_B1_Weights(WeightsEnum): "acc@5": 94.186, } }, - "_ops": 0.686803, + "_ops": 0.7, "_weight_size": 30.133798, "_docs": """These weights are ported from the original paper.""", }, @@ -508,7 +508,7 @@ class EfficientNet_B1_Weights(WeightsEnum): "acc@5": 94.934, } }, - "_ops": 0.686803, + "_ops": 0.7, "_weight_size": 30.136422, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -536,7 +536,7 @@ class EfficientNet_B2_Weights(WeightsEnum): "acc@5": 95.310, } }, - "_ops": 1.087529, + "_ops": 1.1, "_weight_size": 35.173593, "_docs": """These weights are ported from the original paper.""", }, @@ -560,7 +560,7 @@ class EfficientNet_B3_Weights(WeightsEnum): "acc@5": 96.054, } }, - "_ops": 1.827141, + "_ops": 1.8, "_weight_size": 47.183904, "_docs": """These weights are ported from the original paper.""", }, @@ -584,7 +584,7 @@ class EfficientNet_B4_Weights(WeightsEnum): "acc@5": 96.594, } }, - "_ops": 4.393771, + "_ops": 4.4, "_weight_size": 74.489011, "_docs": """These weights are ported from the original paper.""", }, @@ -608,7 +608,7 @@ class EfficientNet_B5_Weights(WeightsEnum): "acc@5": 96.628, } }, - "_ops": 10.266385, + "_ops": 10.3, "_weight_size": 116.863912, "_docs": """These weights are ported from the original paper.""", }, @@ -632,7 +632,7 @@ class EfficientNet_B6_Weights(WeightsEnum): "acc@5": 96.916, } }, - "_ops": 19.067594, + "_ops": 19.1, "_weight_size": 165.361524, "_docs": """These weights are ported from the original paper.""", }, @@ -656,7 +656,7 @@ class EfficientNet_B7_Weights(WeightsEnum): "acc@5": 96.908, } }, - "_ops": 37.745884, + "_ops": 37.7, "_weight_size": 254.675393, "_docs": """These weights are ported from the original paper.""", }, @@ -682,7 +682,7 @@ class EfficientNet_V2_S_Weights(WeightsEnum): "acc@5": 96.878, } }, - "_ops": 8.365617, + "_ops": 8.4, "_weight_size": 82.703832, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -712,7 +712,7 @@ class EfficientNet_V2_M_Weights(WeightsEnum): "acc@5": 97.156, } }, - "_ops": 24.582460, + "_ops": 24.6, "_weight_size": 208.010186, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -745,7 +745,7 @@ class EfficientNet_V2_L_Weights(WeightsEnum): "acc@5": 97.788, } }, - "_ops": 56.079699, + "_ops": 56.1, "_weight_size": 454.573071, "_docs": """These weights are ported from the original paper.""", }, diff --git a/torchvision/models/googlenet.py b/torchvision/models/googlenet.py index 123ee930ce5..f467c0a4b3e 100644 --- a/torchvision/models/googlenet.py +++ b/torchvision/models/googlenet.py @@ -290,7 +290,7 @@ class GoogLeNet_Weights(WeightsEnum): "acc@5": 89.530, } }, - "_ops": 1.498376, + "_ops": 1.5, "_weight_size": 49.731288, "_docs": """These weights are ported from the original paper.""", }, diff --git a/torchvision/models/inception.py b/torchvision/models/inception.py index 433a9e2dd25..0e11fb803f8 100644 --- a/torchvision/models/inception.py +++ b/torchvision/models/inception.py @@ -422,7 +422,7 @@ class Inception_V3_Weights(WeightsEnum): "acc@5": 93.450, } }, - "_ops": 5.713216, + "_ops": 5.7, "_weight_size": 103.902575, "_docs": """These weights are ported from the original paper.""", }, diff --git a/torchvision/models/maxvit.py b/torchvision/models/maxvit.py index c3be63446ee..bf1f9a01c6f 100644 --- a/torchvision/models/maxvit.py +++ b/torchvision/models/maxvit.py @@ -785,7 +785,7 @@ class MaxVit_T_Weights(WeightsEnum): "acc@5": 96.722, } }, - "_ops": 5.558047, + "_ops": 5.6, "_weight_size": 118.769322, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, diff --git a/torchvision/models/mnasnet.py b/torchvision/models/mnasnet.py index 31812d941c0..83b14070d73 100644 --- a/torchvision/models/mnasnet.py +++ b/torchvision/models/mnasnet.py @@ -231,7 +231,7 @@ class MNASNet0_5_Weights(WeightsEnum): "acc@5": 87.490, } }, - "_ops": 0.104456, + "_ops": 0.1, "_weight_size": 8.591165, "_docs": """These weights reproduce closely the results of the paper.""", }, @@ -253,7 +253,7 @@ class MNASNet0_75_Weights(WeightsEnum): "acc@5": 90.496, } }, - "_ops": 0.215493, + "_ops": 0.2, "_weight_size": 12.302564, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe @@ -277,7 +277,7 @@ class MNASNet1_0_Weights(WeightsEnum): "acc@5": 91.510, } }, - "_ops": 0.314416, + "_ops": 0.3, "_weight_size": 16.915318, "_docs": """These weights reproduce closely the results of the paper.""", }, @@ -299,7 +299,7 @@ class MNASNet1_3_Weights(WeightsEnum): "acc@5": 93.522, } }, - "_ops": 0.526362, + "_ops": 0.5, "_weight_size": 24.245618, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe diff --git a/torchvision/models/mobilenetv2.py b/torchvision/models/mobilenetv2.py index bcb8df8b93b..7cde8db6513 100644 --- a/torchvision/models/mobilenetv2.py +++ b/torchvision/models/mobilenetv2.py @@ -194,7 +194,7 @@ class MobileNet_V2_Weights(WeightsEnum): "acc@5": 90.286, } }, - "_ops": 0.300774, + "_ops": 0.3, "_weight_size": 13.554546, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -211,7 +211,7 @@ class MobileNet_V2_Weights(WeightsEnum): "acc@5": 90.822, } }, - "_ops": 0.300774, + "_ops": 0.3, "_weight_size": 13.598035, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's diff --git a/torchvision/models/mobilenetv3.py b/torchvision/models/mobilenetv3.py index 14f87997ab5..07ae7c2e6f6 100644 --- a/torchvision/models/mobilenetv3.py +++ b/torchvision/models/mobilenetv3.py @@ -307,7 +307,7 @@ class MobileNet_V3_Large_Weights(WeightsEnum): "acc@5": 91.340, } }, - "_ops": 0.216590, + "_ops": 0.2, "_weight_size": 21.113799, "_docs": """These weights were trained from scratch by using a simple training recipe.""", }, @@ -325,7 +325,7 @@ class MobileNet_V3_Large_Weights(WeightsEnum): "acc@5": 92.566, } }, - "_ops": 0.216590, + "_ops": 0.2, "_weight_size": 21.106828, "_docs": """ These weights improve marginally upon the results of the original paper by using a modified version of @@ -351,7 +351,7 @@ class MobileNet_V3_Small_Weights(WeightsEnum): "acc@5": 87.402, } }, - "_ops": 0.056510, + "_ops": 0.1, "_weight_size": 9.829093, "_docs": """ These weights improve upon the results of the original paper by using a simple training recipe. diff --git a/torchvision/models/optical_flow/raft.py b/torchvision/models/optical_flow/raft.py index e83b727a8fd..a776735b6af 100644 --- a/torchvision/models/optical_flow/raft.py +++ b/torchvision/models/optical_flow/raft.py @@ -552,7 +552,7 @@ class Raft_Large_Weights(WeightsEnum): "Sintel-Train-Finalpass": {"epe": 2.7894}, "Kitti-Train": {"per_image_epe": 5.0172, "fl_all": 17.4506}, }, - "_ops": 211.007046, + "_ops": 211.0, "_weight_size": 20.128829, "_docs": """These weights were ported from the original paper. They are trained on :class:`~torchvision.datasets.FlyingChairs` + @@ -572,7 +572,7 @@ class Raft_Large_Weights(WeightsEnum): "Sintel-Train-Finalpass": {"epe": 2.7161}, "Kitti-Train": {"per_image_epe": 4.5118, "fl_all": 16.0679}, }, - "_ops": 211.007046, + "_ops": 211.0, "_weight_size": 20.128829, "_docs": """These weights were trained from scratch on :class:`~torchvision.datasets.FlyingChairs` + @@ -592,7 +592,7 @@ class Raft_Large_Weights(WeightsEnum): "Sintel-Test-Cleanpass": {"epe": 1.94}, "Sintel-Test-Finalpass": {"epe": 3.18}, }, - "_ops": 211.007046, + "_ops": 211.0, "_weight_size": 20.128829, "_docs": """ These weights were ported from the original paper. They are @@ -618,7 +618,7 @@ class Raft_Large_Weights(WeightsEnum): "Sintel-Test-Cleanpass": {"epe": 1.819}, "Sintel-Test-Finalpass": {"epe": 3.067}, }, - "_ops": 211.007046, + "_ops": 211.0, "_weight_size": 20.128829, "_docs": """ These weights were trained from scratch. They are @@ -644,7 +644,7 @@ class Raft_Large_Weights(WeightsEnum): "_metrics": { "Kitti-Test": {"fl_all": 5.10}, }, - "_ops": 211.007046, + "_ops": 211.0, "_weight_size": 20.128829, "_docs": """ These weights were ported from the original paper. They are @@ -667,7 +667,7 @@ class Raft_Large_Weights(WeightsEnum): "_metrics": { "Kitti-Test": {"fl_all": 5.19}, }, - "_ops": 211.007046, + "_ops": 211.0, "_weight_size": 20.128829, "_docs": """ These weights were trained from scratch. They are @@ -710,7 +710,7 @@ class Raft_Small_Weights(WeightsEnum): "Sintel-Train-Finalpass": {"epe": 3.2790}, "Kitti-Train": {"per_image_epe": 7.6557, "fl_all": 25.2801}, }, - "_ops": 47.655158, + "_ops": 47.7, "_weight_size": 3.820600, "_docs": """These weights were ported from the original paper. They are trained on :class:`~torchvision.datasets.FlyingChairs` + @@ -729,7 +729,7 @@ class Raft_Small_Weights(WeightsEnum): "Sintel-Train-Finalpass": {"epe": 3.2831}, "Kitti-Train": {"per_image_epe": 7.5978, "fl_all": 25.2369}, }, - "_ops": 47.655158, + "_ops": 47.7, "_weight_size": 3.820600, "_docs": """These weights were trained from scratch on :class:`~torchvision.datasets.FlyingChairs` + diff --git a/torchvision/models/quantization/googlenet.py b/torchvision/models/quantization/googlenet.py index 53d181973b4..46c0ee400f3 100644 --- a/torchvision/models/quantization/googlenet.py +++ b/torchvision/models/quantization/googlenet.py @@ -123,7 +123,7 @@ class GoogLeNet_QuantizedWeights(WeightsEnum): "acc@5": 89.404, } }, - "_ops": 1.498376, + "_ops": 1.5, "_weight_size": 12.617729, "_docs": """ These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized diff --git a/torchvision/models/quantization/inception.py b/torchvision/models/quantization/inception.py index 3450ed8307f..e789c8ed3ea 100644 --- a/torchvision/models/quantization/inception.py +++ b/torchvision/models/quantization/inception.py @@ -183,7 +183,7 @@ class Inception_V3_QuantizedWeights(WeightsEnum): "acc@5": 93.354, } }, - "_ops": 5.713216, + "_ops": 5.7, "_weight_size": 23.145652, "_docs": """ These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized diff --git a/torchvision/models/quantization/mobilenetv2.py b/torchvision/models/quantization/mobilenetv2.py index f64d92ead0d..fc21fc6a51f 100644 --- a/torchvision/models/quantization/mobilenetv2.py +++ b/torchvision/models/quantization/mobilenetv2.py @@ -80,7 +80,7 @@ class MobileNet_V2_QuantizedWeights(WeightsEnum): "acc@5": 90.150, } }, - "_ops": 0.300774, + "_ops": 0.3, "_weight_size": 3.422719, "_docs": """ These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized diff --git a/torchvision/models/quantization/mobilenetv3.py b/torchvision/models/quantization/mobilenetv3.py index 0012b53e172..546a3981621 100644 --- a/torchvision/models/quantization/mobilenetv3.py +++ b/torchvision/models/quantization/mobilenetv3.py @@ -175,7 +175,7 @@ class MobileNet_V3_Large_QuantizedWeights(WeightsEnum): "acc@5": 90.858, } }, - "_ops": 0.216590, + "_ops": 0.2, "_weight_size": 21.553815, "_docs": """ These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized diff --git a/torchvision/models/quantization/resnet.py b/torchvision/models/quantization/resnet.py index 55f03ac66ab..b858a402ede 100644 --- a/torchvision/models/quantization/resnet.py +++ b/torchvision/models/quantization/resnet.py @@ -175,7 +175,7 @@ class ResNet18_QuantizedWeights(WeightsEnum): "acc@5": 88.882, } }, - "_ops": 1.814073, + "_ops": 1.8, "_weight_size": 11.238080, }, ) @@ -196,7 +196,7 @@ class ResNet50_QuantizedWeights(WeightsEnum): "acc@5": 92.814, } }, - "_ops": 4.089184, + "_ops": 4.1, "_weight_size": 24.758719, }, ) @@ -213,7 +213,7 @@ class ResNet50_QuantizedWeights(WeightsEnum): "acc@5": 94.976, } }, - "_ops": 4.089184, + "_ops": 4.1, "_weight_size": 24.953237, }, ) @@ -234,7 +234,7 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum): "acc@5": 94.480, } }, - "_ops": 16.414015, + "_ops": 16.4, "_weight_size": 86.034273, }, ) @@ -251,7 +251,7 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum): "acc@5": 96.132, } }, - "_ops": 16.414015, + "_ops": 16.4, "_weight_size": 86.645276, }, ) @@ -273,7 +273,7 @@ class ResNeXt101_64X4D_QuantizedWeights(WeightsEnum): "acc@5": 96.326, } }, - "_ops": 15.460270, + "_ops": 15.5, "_weight_size": 81.556409, }, ) diff --git a/torchvision/models/quantization/shufflenetv2.py b/torchvision/models/quantization/shufflenetv2.py index c98ea4ed16f..574c0918546 100644 --- a/torchvision/models/quantization/shufflenetv2.py +++ b/torchvision/models/quantization/shufflenetv2.py @@ -139,7 +139,7 @@ class ShuffleNet_V2_X0_5_QuantizedWeights(WeightsEnum): "acc@5": 79.780, } }, - "_ops": 0.040476, + "_ops": 0.0, "_weight_size": 1.500871, }, ) @@ -160,7 +160,7 @@ class ShuffleNet_V2_X1_0_QuantizedWeights(WeightsEnum): "acc@5": 87.582, } }, - "_ops": 0.144908, + "_ops": 0.1, "_weight_size": 2.333858, }, ) @@ -182,7 +182,7 @@ class ShuffleNet_V2_X1_5_QuantizedWeights(WeightsEnum): "acc@5": 90.700, } }, - "_ops": 0.295759, + "_ops": 0.3, "_weight_size": 3.671769, }, ) @@ -204,7 +204,7 @@ class ShuffleNet_V2_X2_0_QuantizedWeights(WeightsEnum): "acc@5": 92.488, } }, - "_ops": 0.583253, + "_ops": 0.6, "_weight_size": 7.467118, }, ) diff --git a/torchvision/models/regnet.py b/torchvision/models/regnet.py index 3db784f8042..aa89f4715c2 100644 --- a/torchvision/models/regnet.py +++ b/torchvision/models/regnet.py @@ -428,7 +428,7 @@ class RegNet_Y_400MF_Weights(WeightsEnum): "acc@5": 91.716, } }, - "_ops": 0.401843, + "_ops": 0.4, "_weight_size": 16.805909, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -446,7 +446,7 @@ class RegNet_Y_400MF_Weights(WeightsEnum): "acc@5": 92.742, } }, - "_ops": 0.401843, + "_ops": 0.4, "_weight_size": 16.805909, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -472,7 +472,7 @@ class RegNet_Y_800MF_Weights(WeightsEnum): "acc@5": 93.136, } }, - "_ops": 0.833856, + "_ops": 0.8, "_weight_size": 24.773644, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -490,7 +490,7 @@ class RegNet_Y_800MF_Weights(WeightsEnum): "acc@5": 94.502, } }, - "_ops": 0.833856, + "_ops": 0.8, "_weight_size": 24.773522, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -516,7 +516,7 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum): "acc@5": 93.966, } }, - "_ops": 1.612076, + "_ops": 1.6, "_weight_size": 43.152310, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -534,7 +534,7 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum): "acc@5": 95.444, } }, - "_ops": 1.612076, + "_ops": 1.6, "_weight_size": 43.152310, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -560,7 +560,7 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum): "acc@5": 94.576, } }, - "_ops": 3.176483, + "_ops": 3.2, "_weight_size": 74.566991, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -578,7 +578,7 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum): "acc@5": 95.972, } }, - "_ops": 3.176483, + "_ops": 3.2, "_weight_size": 74.566991, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -604,7 +604,7 @@ class RegNet_Y_8GF_Weights(WeightsEnum): "acc@5": 95.048, } }, - "_ops": 8.473071, + "_ops": 8.5, "_weight_size": 150.701436, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -622,7 +622,7 @@ class RegNet_Y_8GF_Weights(WeightsEnum): "acc@5": 96.330, } }, - "_ops": 8.473071, + "_ops": 8.5, "_weight_size": 150.701436, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -648,7 +648,7 @@ class RegNet_Y_16GF_Weights(WeightsEnum): "acc@5": 95.240, } }, - "_ops": 15.911510, + "_ops": 15.9, "_weight_size": 319.490335, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -666,7 +666,7 @@ class RegNet_Y_16GF_Weights(WeightsEnum): "acc@5": 96.328, } }, - "_ops": 15.911510, + "_ops": 15.9, "_weight_size": 319.490335, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -689,7 +689,7 @@ class RegNet_Y_16GF_Weights(WeightsEnum): "acc@5": 98.054, } }, - "_ops": 46.734897, + "_ops": 46.7, "_weight_size": 319.490335, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original @@ -712,7 +712,7 @@ class RegNet_Y_16GF_Weights(WeightsEnum): "acc@5": 97.244, } }, - "_ops": 15.911510, + "_ops": 15.9, "_weight_size": 319.490335, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk @@ -737,7 +737,7 @@ class RegNet_Y_32GF_Weights(WeightsEnum): "acc@5": 95.340, } }, - "_ops": 32.279553, + "_ops": 32.3, "_weight_size": 554.076371, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -755,7 +755,7 @@ class RegNet_Y_32GF_Weights(WeightsEnum): "acc@5": 96.498, } }, - "_ops": 32.279553, + "_ops": 32.3, "_weight_size": 554.076371, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -778,7 +778,7 @@ class RegNet_Y_32GF_Weights(WeightsEnum): "acc@5": 98.362, } }, - "_ops": 94.826458, + "_ops": 94.8, "_weight_size": 554.076371, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original @@ -801,7 +801,7 @@ class RegNet_Y_32GF_Weights(WeightsEnum): "acc@5": 97.480, } }, - "_ops": 32.279553, + "_ops": 32.3, "_weight_size": 554.076371, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk @@ -827,7 +827,7 @@ class RegNet_Y_128GF_Weights(WeightsEnum): "acc@5": 98.682, } }, - "_ops": 374.570006, + "_ops": 374.6, "_weight_size": 2461.563993, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original @@ -850,7 +850,7 @@ class RegNet_Y_128GF_Weights(WeightsEnum): "acc@5": 97.844, } }, - "_ops": 127.517816, + "_ops": 127.5, "_weight_size": 2461.563993, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk @@ -875,7 +875,7 @@ class RegNet_X_400MF_Weights(WeightsEnum): "acc@5": 90.950, } }, - "_ops": 0.413813, + "_ops": 0.4, "_weight_size": 21.258035, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -893,7 +893,7 @@ class RegNet_X_400MF_Weights(WeightsEnum): "acc@5": 92.322, } }, - "_ops": 0.413813, + "_ops": 0.4, "_weight_size": 21.256570, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -919,7 +919,7 @@ class RegNet_X_800MF_Weights(WeightsEnum): "acc@5": 92.348, } }, - "_ops": 0.799700, + "_ops": 0.8, "_weight_size": 27.945130, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -937,7 +937,7 @@ class RegNet_X_800MF_Weights(WeightsEnum): "acc@5": 93.826, } }, - "_ops": 0.799700, + "_ops": 0.8, "_weight_size": 27.945130, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -963,7 +963,7 @@ class RegNet_X_1_6GF_Weights(WeightsEnum): "acc@5": 93.440, } }, - "_ops": 1.602850, + "_ops": 1.6, "_weight_size": 35.339471, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -981,7 +981,7 @@ class RegNet_X_1_6GF_Weights(WeightsEnum): "acc@5": 94.922, } }, - "_ops": 1.602850, + "_ops": 1.6, "_weight_size": 35.339471, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -1007,7 +1007,7 @@ class RegNet_X_3_2GF_Weights(WeightsEnum): "acc@5": 93.992, } }, - "_ops": 3.176622, + "_ops": 3.2, "_weight_size": 58.755979, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -1025,7 +1025,7 @@ class RegNet_X_3_2GF_Weights(WeightsEnum): "acc@5": 95.430, } }, - "_ops": 3.176622, + "_ops": 3.2, "_weight_size": 58.755979, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -1051,7 +1051,7 @@ class RegNet_X_8GF_Weights(WeightsEnum): "acc@5": 94.686, } }, - "_ops": 7.995132, + "_ops": 8.0, "_weight_size": 151.455937, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -1069,7 +1069,7 @@ class RegNet_X_8GF_Weights(WeightsEnum): "acc@5": 95.678, } }, - "_ops": 7.995132, + "_ops": 8.0, "_weight_size": 151.455937, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -1095,7 +1095,7 @@ class RegNet_X_16GF_Weights(WeightsEnum): "acc@5": 94.944, } }, - "_ops": 15.940755, + "_ops": 15.9, "_weight_size": 207.627419, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -1113,7 +1113,7 @@ class RegNet_X_16GF_Weights(WeightsEnum): "acc@5": 96.196, } }, - "_ops": 15.940755, + "_ops": 15.9, "_weight_size": 207.627419, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's @@ -1139,7 +1139,7 @@ class RegNet_X_32GF_Weights(WeightsEnum): "acc@5": 95.248, } }, - "_ops": 31.735930, + "_ops": 31.7, "_weight_size": 412.039433, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -1157,7 +1157,7 @@ class RegNet_X_32GF_Weights(WeightsEnum): "acc@5": 96.288, } }, - "_ops": 31.735930, + "_ops": 31.7, "_weight_size": 412.039433, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's diff --git a/torchvision/models/resnet.py b/torchvision/models/resnet.py index 1bc97ca5355..56c41418eec 100644 --- a/torchvision/models/resnet.py +++ b/torchvision/models/resnet.py @@ -323,7 +323,7 @@ class ResNet18_Weights(WeightsEnum): "acc@5": 89.078, } }, - "_ops": 1.814073, + "_ops": 1.8, "_weight_size": 44.661113, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -345,7 +345,7 @@ class ResNet34_Weights(WeightsEnum): "acc@5": 91.420, } }, - "_ops": 3.663761, + "_ops": 3.7, "_weight_size": 83.274669, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -367,7 +367,7 @@ class ResNet50_Weights(WeightsEnum): "acc@5": 92.862, } }, - "_ops": 4.089184, + "_ops": 4.1, "_weight_size": 97.780545, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -385,7 +385,7 @@ class ResNet50_Weights(WeightsEnum): "acc@5": 95.434, } }, - "_ops": 4.089184, + "_ops": 4.1, "_weight_size": 97.790162, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe @@ -410,7 +410,7 @@ class ResNet101_Weights(WeightsEnum): "acc@5": 93.546, } }, - "_ops": 7.801405, + "_ops": 7.8, "_weight_size": 170.511188, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -428,7 +428,7 @@ class ResNet101_Weights(WeightsEnum): "acc@5": 95.780, } }, - "_ops": 7.801405, + "_ops": 7.8, "_weight_size": 170.530362, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe @@ -453,7 +453,7 @@ class ResNet152_Weights(WeightsEnum): "acc@5": 94.046, } }, - "_ops": 11.513627, + "_ops": 11.5, "_weight_size": 230.434152, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -471,7 +471,7 @@ class ResNet152_Weights(WeightsEnum): "acc@5": 96.002, } }, - "_ops": 11.513627, + "_ops": 11.5, "_weight_size": 230.473687, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe @@ -496,7 +496,7 @@ class ResNeXt50_32X4D_Weights(WeightsEnum): "acc@5": 93.698, } }, - "_ops": 4.230480, + "_ops": 4.2, "_weight_size": 95.788646, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -514,7 +514,7 @@ class ResNeXt50_32X4D_Weights(WeightsEnum): "acc@5": 95.340, } }, - "_ops": 4.230480, + "_ops": 4.2, "_weight_size": 95.833192, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe @@ -539,7 +539,7 @@ class ResNeXt101_32X8D_Weights(WeightsEnum): "acc@5": 94.526, } }, - "_ops": 16.414015, + "_ops": 16.4, "_weight_size": 339.586349, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -557,7 +557,7 @@ class ResNeXt101_32X8D_Weights(WeightsEnum): "acc@5": 96.228, } }, - "_ops": 16.414015, + "_ops": 16.4, "_weight_size": 339.673062, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe @@ -582,7 +582,7 @@ class ResNeXt101_64X4D_Weights(WeightsEnum): "acc@5": 96.454, } }, - "_ops": 15.460270, + "_ops": 15.5, "_weight_size": 319.317594, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe @@ -607,7 +607,7 @@ class Wide_ResNet50_2_Weights(WeightsEnum): "acc@5": 94.086, } }, - "_ops": 11.398021, + "_ops": 11.4, "_weight_size": 131.820194, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -625,7 +625,7 @@ class Wide_ResNet50_2_Weights(WeightsEnum): "acc@5": 95.758, } }, - "_ops": 11.398021, + "_ops": 11.4, "_weight_size": 263.124207, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe @@ -650,7 +650,7 @@ class Wide_ResNet101_2_Weights(WeightsEnum): "acc@5": 94.284, } }, - "_ops": 22.753051, + "_ops": 22.8, "_weight_size": 242.896219, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, @@ -668,7 +668,7 @@ class Wide_ResNet101_2_Weights(WeightsEnum): "acc@5": 96.020, } }, - "_ops": 22.753051, + "_ops": 22.8, "_weight_size": 484.747281, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe diff --git a/torchvision/models/segmentation/deeplabv3.py b/torchvision/models/segmentation/deeplabv3.py index ef4d807f461..f1a2cc4200d 100644 --- a/torchvision/models/segmentation/deeplabv3.py +++ b/torchvision/models/segmentation/deeplabv3.py @@ -152,7 +152,7 @@ class DeepLabV3_ResNet50_Weights(WeightsEnum): "pixel_acc": 92.4, } }, - "_ops": 178.721945, + "_ops": 178.7, "_weight_size": 160.514977, }, ) @@ -173,7 +173,7 @@ class DeepLabV3_ResNet101_Weights(WeightsEnum): "pixel_acc": 92.4, } }, - "_ops": 258.743039, + "_ops": 258.7, "_weight_size": 233.216800, }, ) @@ -194,7 +194,7 @@ class DeepLabV3_MobileNet_V3_Large_Weights(WeightsEnum): "pixel_acc": 91.2, } }, - "_ops": 10.452448, + "_ops": 10.5, "_weight_size": 42.301330, }, ) diff --git a/torchvision/models/segmentation/fcn.py b/torchvision/models/segmentation/fcn.py index 6486b3aee7e..1a4dffd24b7 100644 --- a/torchvision/models/segmentation/fcn.py +++ b/torchvision/models/segmentation/fcn.py @@ -71,7 +71,7 @@ class FCN_ResNet50_Weights(WeightsEnum): "pixel_acc": 91.4, } }, - "_ops": 152.716512, + "_ops": 152.7, "_weight_size": 135.009211, }, ) @@ -92,7 +92,7 @@ class FCN_ResNet101_Weights(WeightsEnum): "pixel_acc": 91.9, } }, - "_ops": 232.737606, + "_ops": 232.7, "_weight_size": 207.711034, }, ) diff --git a/torchvision/models/segmentation/lraspp.py b/torchvision/models/segmentation/lraspp.py index d00cfa96e0e..551053bc63d 100644 --- a/torchvision/models/segmentation/lraspp.py +++ b/torchvision/models/segmentation/lraspp.py @@ -108,7 +108,7 @@ class LRASPP_MobileNet_V3_Large_Weights(WeightsEnum): "pixel_acc": 91.2, } }, - "_ops": 2.086220, + "_ops": 2.1, "_weight_size": 12.490331, "_docs": """ These weights were trained on a subset of COCO, using only the 20 categories that are present in the diff --git a/torchvision/models/shufflenetv2.py b/torchvision/models/shufflenetv2.py index 2c62fc1d6d8..b61cc2560bd 100644 --- a/torchvision/models/shufflenetv2.py +++ b/torchvision/models/shufflenetv2.py @@ -204,7 +204,7 @@ class ShuffleNet_V2_X0_5_Weights(WeightsEnum): "acc@5": 81.746, } }, - "_ops": 0.040476, + "_ops": 0.0, "_weight_size": 5.281570, "_docs": """These weights were trained from scratch to reproduce closely the results of the paper.""", }, @@ -226,7 +226,7 @@ class ShuffleNet_V2_X1_0_Weights(WeightsEnum): "acc@5": 88.316, } }, - "_ops": 0.144908, + "_ops": 0.1, "_weight_size": 8.791250, "_docs": """These weights were trained from scratch to reproduce closely the results of the paper.""", }, @@ -248,7 +248,7 @@ class ShuffleNet_V2_X1_5_Weights(WeightsEnum): "acc@5": 91.086, } }, - "_ops": 0.295759, + "_ops": 0.3, "_weight_size": 13.557034, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe @@ -273,7 +273,7 @@ class ShuffleNet_V2_X2_0_Weights(WeightsEnum): "acc@5": 93.006, } }, - "_ops": 0.583253, + "_ops": 0.6, "_weight_size": 28.432767, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe diff --git a/torchvision/models/squeezenet.py b/torchvision/models/squeezenet.py index 8b7609da655..d2a467c1f18 100644 --- a/torchvision/models/squeezenet.py +++ b/torchvision/models/squeezenet.py @@ -135,7 +135,7 @@ class SqueezeNet1_0_Weights(WeightsEnum): "acc@5": 80.420, } }, - "_ops": 0.818925, + "_ops": 0.8, "_weight_size": 4.778434, }, ) @@ -156,7 +156,7 @@ class SqueezeNet1_1_Weights(WeightsEnum): "acc@5": 80.624, } }, - "_ops": 0.349152, + "_ops": 0.3, "_weight_size": 4.729117, }, ) diff --git a/torchvision/models/swin_transformer.py b/torchvision/models/swin_transformer.py index c18cff6e19a..50762c0145d 100644 --- a/torchvision/models/swin_transformer.py +++ b/torchvision/models/swin_transformer.py @@ -660,7 +660,7 @@ class Swin_T_Weights(WeightsEnum): "acc@5": 95.776, } }, - "_ops": 4.490567, + "_ops": 4.5, "_weight_size": 108.190383, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, @@ -685,7 +685,7 @@ class Swin_S_Weights(WeightsEnum): "acc@5": 96.360, } }, - "_ops": 8.740875, + "_ops": 8.7, "_weight_size": 189.786254, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, @@ -710,7 +710,7 @@ class Swin_B_Weights(WeightsEnum): "acc@5": 96.640, } }, - "_ops": 15.430947, + "_ops": 15.4, "_weight_size": 335.363585, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, @@ -735,7 +735,7 @@ class Swin_V2_T_Weights(WeightsEnum): "acc@5": 96.132, } }, - "_ops": 5.939690, + "_ops": 5.9, "_weight_size": 108.625840, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, @@ -760,7 +760,7 @@ class Swin_V2_S_Weights(WeightsEnum): "acc@5": 96.816, } }, - "_ops": 11.545857, + "_ops": 11.5, "_weight_size": 190.674577, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, @@ -785,7 +785,7 @@ class Swin_V2_B_Weights(WeightsEnum): "acc@5": 96.864, } }, - "_ops": 20.325134, + "_ops": 20.3, "_weight_size": 336.371781, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, diff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py index e83970d61af..777a376c87e 100644 --- a/torchvision/models/vgg.py +++ b/torchvision/models/vgg.py @@ -127,7 +127,7 @@ class VGG11_Weights(WeightsEnum): "acc@5": 88.628, } }, - "_ops": 7.609090, + "_ops": 7.6, "_weight_size": 506.840077, }, ) @@ -147,7 +147,7 @@ class VGG11_BN_Weights(WeightsEnum): "acc@5": 89.810, } }, - "_ops": 7.609090, + "_ops": 7.6, "_weight_size": 506.881400, }, ) @@ -167,7 +167,7 @@ class VGG13_Weights(WeightsEnum): "acc@5": 89.246, } }, - "_ops": 11.308466, + "_ops": 11.3, "_weight_size": 507.545068, }, ) @@ -187,7 +187,7 @@ class VGG13_BN_Weights(WeightsEnum): "acc@5": 90.374, } }, - "_ops": 11.308466, + "_ops": 11.3, "_weight_size": 507.589627, }, ) @@ -207,7 +207,7 @@ class VGG16_Weights(WeightsEnum): "acc@5": 90.382, } }, - "_ops": 15.470264, + "_ops": 15.5, "_weight_size": 527.795678, }, ) @@ -231,7 +231,7 @@ class VGG16_Weights(WeightsEnum): "acc@5": float("nan"), } }, - "_ops": 15.470264, + "_ops": 15.5, "_weight_size": 527.801824, "_docs": """ These weights can't be used for classification because they are missing values in the `classifier` @@ -256,7 +256,7 @@ class VGG16_BN_Weights(WeightsEnum): "acc@5": 91.516, } }, - "_ops": 15.470264, + "_ops": 15.5, "_weight_size": 527.866207, }, ) @@ -276,7 +276,7 @@ class VGG19_Weights(WeightsEnum): "acc@5": 90.876, } }, - "_ops": 19.632062, + "_ops": 19.6, "_weight_size": 548.051225, }, ) @@ -296,7 +296,7 @@ class VGG19_BN_Weights(WeightsEnum): "acc@5": 91.842, } }, - "_ops": 19.632062, + "_ops": 19.6, "_weight_size": 548.142819, }, ) diff --git a/torchvision/models/video/mvit.py b/torchvision/models/video/mvit.py index 4a2db21a997..df0429269e3 100644 --- a/torchvision/models/video/mvit.py +++ b/torchvision/models/video/mvit.py @@ -624,7 +624,7 @@ class MViT_V1_B_Weights(WeightsEnum): "acc@5": 93.582, } }, - "_ops": 70.599408, + "_ops": 70.6, "_weight_size": 139.764235, }, ) @@ -657,7 +657,7 @@ class MViT_V2_S_Weights(WeightsEnum): "acc@5": 94.665, } }, - "_ops": 64.223816, + "_ops": 64.2, "_weight_size": 131.883704, }, ) diff --git a/torchvision/models/video/resnet.py b/torchvision/models/video/resnet.py index 022aa2b3da1..0e765da65ec 100644 --- a/torchvision/models/video/resnet.py +++ b/torchvision/models/video/resnet.py @@ -332,7 +332,7 @@ class R3D_18_Weights(WeightsEnum): "acc@5": 83.479, } }, - "_ops": 40.696553, + "_ops": 40.7, "_weight_size": 127.359406, }, ) @@ -352,7 +352,7 @@ class MC3_18_Weights(WeightsEnum): "acc@5": 84.130, } }, - "_ops": 43.342635, + "_ops": 43.3, "_weight_size": 44.671906, }, ) @@ -372,7 +372,7 @@ class R2Plus1D_18_Weights(WeightsEnum): "acc@5": 86.175, } }, - "_ops": 40.519081, + "_ops": 40.5, "_weight_size": 120.318409, }, ) diff --git a/torchvision/models/video/s3d.py b/torchvision/models/video/s3d.py index b0429e2b246..da8337024d2 100644 --- a/torchvision/models/video/s3d.py +++ b/torchvision/models/video/s3d.py @@ -175,7 +175,7 @@ class S3D_Weights(WeightsEnum): "acc@5": 88.050, } }, - "_ops": 17.978771, + "_ops": 18.0, "_weight_size": 31.971929, }, ) diff --git a/torchvision/models/vision_transformer.py b/torchvision/models/vision_transformer.py index 684755ef4c3..d77f90255b8 100644 --- a/torchvision/models/vision_transformer.py +++ b/torchvision/models/vision_transformer.py @@ -363,7 +363,7 @@ class ViT_B_16_Weights(WeightsEnum): "acc@5": 95.318, } }, - "_ops": 17.563828, + "_ops": 17.6, "_weight_size": 330.284623, "_docs": """ These weights were trained from scratch by using a modified version of `DeIT @@ -389,7 +389,7 @@ class ViT_B_16_Weights(WeightsEnum): "acc@5": 97.650, } }, - "_ops": 55.484350, + "_ops": 55.5, "_weight_size": 331.397904, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original @@ -416,7 +416,7 @@ class ViT_B_16_Weights(WeightsEnum): "acc@5": 96.180, } }, - "_ops": 17.563828, + "_ops": 17.6, "_weight_size": 330.284623, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk @@ -442,7 +442,7 @@ class ViT_B_32_Weights(WeightsEnum): "acc@5": 92.466, } }, - "_ops": 4.409186, + "_ops": 4.4, "_weight_size": 336.603959, "_docs": """ These weights were trained from scratch by using a modified version of `DeIT @@ -468,7 +468,7 @@ class ViT_L_16_Weights(WeightsEnum): "acc@5": 94.638, } }, - "_ops": 61.554713, + "_ops": 61.6, "_weight_size": 1161.023240, "_docs": """ These weights were trained from scratch by using a modified version of TorchVision's @@ -495,7 +495,7 @@ class ViT_L_16_Weights(WeightsEnum): "acc@5": 98.512, } }, - "_ops": 361.986286, + "_ops": 362.0, "_weight_size": 1164.257615, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original @@ -522,7 +522,7 @@ class ViT_L_16_Weights(WeightsEnum): "acc@5": 97.422, } }, - "_ops": 61.554713, + "_ops": 61.6, "_weight_size": 1161.023240, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk @@ -548,7 +548,7 @@ class ViT_L_32_Weights(WeightsEnum): "acc@5": 93.07, } }, - "_ops": 15.377539, + "_ops": 15.4, "_weight_size": 1169.448960, "_docs": """ These weights were trained from scratch by using a modified version of `DeIT @@ -578,7 +578,7 @@ class ViT_H_14_Weights(WeightsEnum): "acc@5": 98.694, } }, - "_ops": 1016.716764, + "_ops": 1016.7, "_weight_size": 2416.643174, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original @@ -605,7 +605,7 @@ class ViT_H_14_Weights(WeightsEnum): "acc@5": 97.730, } }, - "_ops": 167.295109, + "_ops": 167.3, "_weight_size": 2411.208604, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk From e0c50ba5cd6318974c77c2b8f4c7318510199a20 Mon Sep 17 00:00:00 2001 From: Toni Blaslov Date: Fri, 11 Nov 2022 16:15:13 +0000 Subject: [PATCH 13/15] Rounding num ops and sizes to 3 decimals --- torchvision/models/alexnet.py | 4 +- torchvision/models/convnext.py | 16 +-- torchvision/models/densenet.py | 16 +-- torchvision/models/detection/faster_rcnn.py | 16 +-- torchvision/models/detection/fcos.py | 4 +- torchvision/models/detection/keypoint_rcnn.py | 8 +- torchvision/models/detection/mask_rcnn.py | 8 +- torchvision/models/detection/retinanet.py | 8 +- torchvision/models/detection/ssd.py | 4 +- torchvision/models/detection/ssdlite.py | 4 +- torchvision/models/efficientnet.py | 48 +++---- torchvision/models/googlenet.py | 4 +- torchvision/models/inception.py | 4 +- torchvision/models/maxvit.py | 4 +- torchvision/models/mnasnet.py | 16 +-- torchvision/models/mobilenetv2.py | 8 +- torchvision/models/mobilenetv3.py | 12 +- torchvision/models/optical_flow/raft.py | 32 ++--- torchvision/models/quantization/googlenet.py | 4 +- torchvision/models/quantization/inception.py | 4 +- .../models/quantization/mobilenetv2.py | 4 +- .../models/quantization/mobilenetv3.py | 4 +- torchvision/models/quantization/resnet.py | 24 ++-- .../models/quantization/shufflenetv2.py | 16 +-- torchvision/models/regnet.py | 132 +++++++++--------- torchvision/models/resnet.py | 68 ++++----- torchvision/models/segmentation/deeplabv3.py | 12 +- torchvision/models/segmentation/fcn.py | 8 +- torchvision/models/segmentation/lraspp.py | 4 +- torchvision/models/shufflenetv2.py | 16 +-- torchvision/models/squeezenet.py | 8 +- torchvision/models/swin_transformer.py | 24 ++-- torchvision/models/vgg.py | 36 ++--- torchvision/models/video/mvit.py | 8 +- torchvision/models/video/resnet.py | 12 +- torchvision/models/video/s3d.py | 4 +- torchvision/models/vision_transformer.py | 40 +++--- 37 files changed, 322 insertions(+), 322 deletions(-) diff --git a/torchvision/models/alexnet.py b/torchvision/models/alexnet.py index 5ca3c2434b4..50179d07cf8 100644 --- a/torchvision/models/alexnet.py +++ b/torchvision/models/alexnet.py @@ -67,8 +67,8 @@ class AlexNet_Weights(WeightsEnum): "acc@5": 79.066, } }, - "_ops": 0.7, - "_weight_size": 233.086501, + "_ops": 0.714, + "_weight_size": 233.087, "_docs": """ These weights reproduce closely the results of the paper using a simplified training recipe. """, diff --git a/torchvision/models/convnext.py b/torchvision/models/convnext.py index d98a64de413..21e36b06335 100644 --- a/torchvision/models/convnext.py +++ b/torchvision/models/convnext.py @@ -219,8 +219,8 @@ class ConvNeXt_Tiny_Weights(WeightsEnum): "acc@5": 96.146, } }, - "_ops": 4.5, - "_weight_size": 109.118672, + "_ops": 4.456, + "_weight_size": 109.119, }, ) DEFAULT = IMAGENET1K_V1 @@ -239,8 +239,8 @@ class ConvNeXt_Small_Weights(WeightsEnum): "acc@5": 96.650, } }, - "_ops": 8.7, - "_weight_size": 191.702775, + "_ops": 8.684, + "_weight_size": 191.703, }, ) DEFAULT = IMAGENET1K_V1 @@ -259,8 +259,8 @@ class ConvNeXt_Base_Weights(WeightsEnum): "acc@5": 96.870, } }, - "_ops": 15.4, - "_weight_size": 338.064286, + "_ops": 15.355, + "_weight_size": 338.064, }, ) DEFAULT = IMAGENET1K_V1 @@ -279,8 +279,8 @@ class ConvNeXt_Large_Weights(WeightsEnum): "acc@5": 96.976, } }, - "_ops": 34.4, - "_weight_size": 754.537187, + "_ops": 34.361, + "_weight_size": 754.537, }, ) DEFAULT = IMAGENET1K_V1 diff --git a/torchvision/models/densenet.py b/torchvision/models/densenet.py index b9b51253042..575d123c4e1 100644 --- a/torchvision/models/densenet.py +++ b/torchvision/models/densenet.py @@ -277,8 +277,8 @@ class DenseNet121_Weights(WeightsEnum): "acc@5": 91.972, } }, - "_ops": 2.8, - "_weight_size": 30.844645, + "_ops": 2.834, + "_weight_size": 30.845, }, ) DEFAULT = IMAGENET1K_V1 @@ -297,8 +297,8 @@ class DenseNet161_Weights(WeightsEnum): "acc@5": 93.560, } }, - "_ops": 7.7, - "_weight_size": 110.369482, + "_ops": 7.728, + "_weight_size": 110.369, }, ) DEFAULT = IMAGENET1K_V1 @@ -317,8 +317,8 @@ class DenseNet169_Weights(WeightsEnum): "acc@5": 92.806, } }, - "_ops": 3.4, - "_weight_size": 54.708029, + "_ops": 3.36, + "_weight_size": 54.708, }, ) DEFAULT = IMAGENET1K_V1 @@ -337,8 +337,8 @@ class DenseNet201_Weights(WeightsEnum): "acc@5": 93.370, } }, - "_ops": 4.3, - "_weight_size": 77.373247, + "_ops": 4.291, + "_weight_size": 77.373, }, ) DEFAULT = IMAGENET1K_V1 diff --git a/torchvision/models/detection/faster_rcnn.py b/torchvision/models/detection/faster_rcnn.py index d4434584989..5b97c8fc28a 100644 --- a/torchvision/models/detection/faster_rcnn.py +++ b/torchvision/models/detection/faster_rcnn.py @@ -388,8 +388,8 @@ class FasterRCNN_ResNet50_FPN_Weights(WeightsEnum): "box_map": 37.0, } }, - "_ops": 134.4, - "_weight_size": 159.743153, + "_ops": 134.38, + "_weight_size": 159.743, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, ) @@ -409,8 +409,8 @@ class FasterRCNN_ResNet50_FPN_V2_Weights(WeightsEnum): "box_map": 46.7, } }, - "_ops": 280.4, - "_weight_size": 167.104394, + "_ops": 280.371, + "_weight_size": 167.104, "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""", }, ) @@ -430,8 +430,8 @@ class FasterRCNN_MobileNet_V3_Large_FPN_Weights(WeightsEnum): "box_map": 32.8, } }, - "_ops": 4.5, - "_weight_size": 74.238593, + "_ops": 4.494, + "_weight_size": 74.239, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, ) @@ -451,8 +451,8 @@ class FasterRCNN_MobileNet_V3_Large_320_FPN_Weights(WeightsEnum): "box_map": 22.8, } }, - "_ops": 0.7, - "_weight_size": 74.238593, + "_ops": 0.719, + "_weight_size": 74.239, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, ) diff --git a/torchvision/models/detection/fcos.py b/torchvision/models/detection/fcos.py index 319fe66465e..535518f821c 100644 --- a/torchvision/models/detection/fcos.py +++ b/torchvision/models/detection/fcos.py @@ -662,8 +662,8 @@ class FCOS_ResNet50_FPN_Weights(WeightsEnum): "box_map": 39.2, } }, - "_ops": 128.2, - "_weight_size": 123.607730, + "_ops": 128.207, + "_weight_size": 123.608, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, ) diff --git a/torchvision/models/detection/keypoint_rcnn.py b/torchvision/models/detection/keypoint_rcnn.py index 82ce8430745..6964389f190 100644 --- a/torchvision/models/detection/keypoint_rcnn.py +++ b/torchvision/models/detection/keypoint_rcnn.py @@ -328,8 +328,8 @@ class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum): "kp_map": 61.1, } }, - "_ops": 133.9, - "_weight_size": 226.053994, + "_ops": 133.924, + "_weight_size": 226.054, "_docs": """ These weights were produced by following a similar training recipe as on the paper but use a checkpoint from an early epoch. @@ -349,8 +349,8 @@ class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum): "kp_map": 65.0, } }, - "_ops": 137.4, - "_weight_size": 226.053994, + "_ops": 137.42, + "_weight_size": 226.054, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, ) diff --git a/torchvision/models/detection/mask_rcnn.py b/torchvision/models/detection/mask_rcnn.py index af1355ed432..f8a13a6587b 100644 --- a/torchvision/models/detection/mask_rcnn.py +++ b/torchvision/models/detection/mask_rcnn.py @@ -370,8 +370,8 @@ class MaskRCNN_ResNet50_FPN_Weights(WeightsEnum): "mask_map": 34.6, } }, - "_ops": 134.4, - "_weight_size": 169.839934, + "_ops": 134.38, + "_weight_size": 169.84, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, ) @@ -392,8 +392,8 @@ class MaskRCNN_ResNet50_FPN_V2_Weights(WeightsEnum): "mask_map": 41.8, } }, - "_ops": 333.6, - "_weight_size": 177.219453, + "_ops": 333.577, + "_weight_size": 177.219, "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""", }, ) diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index b87c55d65fe..498ff22071f 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -690,8 +690,8 @@ class RetinaNet_ResNet50_FPN_Weights(WeightsEnum): "box_map": 36.4, } }, - "_ops": 151.5, - "_weight_size": 130.267216, + "_ops": 151.54, + "_weight_size": 130.267, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, ) @@ -711,8 +711,8 @@ class RetinaNet_ResNet50_FPN_V2_Weights(WeightsEnum): "box_map": 41.5, } }, - "_ops": 152.2, - "_weight_size": 146.037091, + "_ops": 152.238, + "_weight_size": 146.037, "_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""", }, ) diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index 9d570bc2f5d..5ec27f45fc4 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -39,8 +39,8 @@ class SSD300_VGG16_Weights(WeightsEnum): "box_map": 25.1, } }, - "_ops": 34.9, - "_weight_size": 135.988447, + "_ops": 34.858, + "_weight_size": 135.988, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, ) diff --git a/torchvision/models/detection/ssdlite.py b/torchvision/models/detection/ssdlite.py index e7876ebf1cb..10e32d248ce 100644 --- a/torchvision/models/detection/ssdlite.py +++ b/torchvision/models/detection/ssdlite.py @@ -198,8 +198,8 @@ class SSDLite320_MobileNet_V3_Large_Weights(WeightsEnum): "box_map": 21.3, } }, - "_ops": 0.6, - "_weight_size": 13.417583, + "_ops": 0.583, + "_weight_size": 13.418, "_docs": """These weights were produced by following a similar training recipe as on the paper.""", }, ) diff --git a/torchvision/models/efficientnet.py b/torchvision/models/efficientnet.py index 88bbd5843a2..05414c93150 100644 --- a/torchvision/models/efficientnet.py +++ b/torchvision/models/efficientnet.py @@ -464,8 +464,8 @@ class EfficientNet_B0_Weights(WeightsEnum): "acc@5": 93.532, } }, - "_ops": 0.4, - "_weight_size": 20.450974, + "_ops": 0.386, + "_weight_size": 20.451, "_docs": """These weights are ported from the original paper.""", }, ) @@ -488,8 +488,8 @@ class EfficientNet_B1_Weights(WeightsEnum): "acc@5": 94.186, } }, - "_ops": 0.7, - "_weight_size": 30.133798, + "_ops": 0.687, + "_weight_size": 30.134, "_docs": """These weights are ported from the original paper.""", }, ) @@ -508,8 +508,8 @@ class EfficientNet_B1_Weights(WeightsEnum): "acc@5": 94.934, } }, - "_ops": 0.7, - "_weight_size": 30.136422, + "_ops": 0.687, + "_weight_size": 30.136, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -536,8 +536,8 @@ class EfficientNet_B2_Weights(WeightsEnum): "acc@5": 95.310, } }, - "_ops": 1.1, - "_weight_size": 35.173593, + "_ops": 1.088, + "_weight_size": 35.174, "_docs": """These weights are ported from the original paper.""", }, ) @@ -560,8 +560,8 @@ class EfficientNet_B3_Weights(WeightsEnum): "acc@5": 96.054, } }, - "_ops": 1.8, - "_weight_size": 47.183904, + "_ops": 1.827, + "_weight_size": 47.184, "_docs": """These weights are ported from the original paper.""", }, ) @@ -584,8 +584,8 @@ class EfficientNet_B4_Weights(WeightsEnum): "acc@5": 96.594, } }, - "_ops": 4.4, - "_weight_size": 74.489011, + "_ops": 4.394, + "_weight_size": 74.489, "_docs": """These weights are ported from the original paper.""", }, ) @@ -608,8 +608,8 @@ class EfficientNet_B5_Weights(WeightsEnum): "acc@5": 96.628, } }, - "_ops": 10.3, - "_weight_size": 116.863912, + "_ops": 10.266, + "_weight_size": 116.864, "_docs": """These weights are ported from the original paper.""", }, ) @@ -632,8 +632,8 @@ class EfficientNet_B6_Weights(WeightsEnum): "acc@5": 96.916, } }, - "_ops": 19.1, - "_weight_size": 165.361524, + "_ops": 19.068, + "_weight_size": 165.362, "_docs": """These weights are ported from the original paper.""", }, ) @@ -656,8 +656,8 @@ class EfficientNet_B7_Weights(WeightsEnum): "acc@5": 96.908, } }, - "_ops": 37.7, - "_weight_size": 254.675393, + "_ops": 37.746, + "_weight_size": 254.675, "_docs": """These weights are ported from the original paper.""", }, ) @@ -682,8 +682,8 @@ class EfficientNet_V2_S_Weights(WeightsEnum): "acc@5": 96.878, } }, - "_ops": 8.4, - "_weight_size": 82.703832, + "_ops": 8.366, + "_weight_size": 82.704, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -712,8 +712,8 @@ class EfficientNet_V2_M_Weights(WeightsEnum): "acc@5": 97.156, } }, - "_ops": 24.6, - "_weight_size": 208.010186, + "_ops": 24.582, + "_weight_size": 208.01, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -745,8 +745,8 @@ class EfficientNet_V2_L_Weights(WeightsEnum): "acc@5": 97.788, } }, - "_ops": 56.1, - "_weight_size": 454.573071, + "_ops": 56.08, + "_weight_size": 454.573, "_docs": """These weights are ported from the original paper.""", }, ) diff --git a/torchvision/models/googlenet.py b/torchvision/models/googlenet.py index f467c0a4b3e..b5435c7bda4 100644 --- a/torchvision/models/googlenet.py +++ b/torchvision/models/googlenet.py @@ -290,8 +290,8 @@ class GoogLeNet_Weights(WeightsEnum): "acc@5": 89.530, } }, - "_ops": 1.5, - "_weight_size": 49.731288, + "_ops": 1.498, + "_weight_size": 49.731, "_docs": """These weights are ported from the original paper.""", }, ) diff --git a/torchvision/models/inception.py b/torchvision/models/inception.py index 0e11fb803f8..d2adb0842d7 100644 --- a/torchvision/models/inception.py +++ b/torchvision/models/inception.py @@ -422,8 +422,8 @@ class Inception_V3_Weights(WeightsEnum): "acc@5": 93.450, } }, - "_ops": 5.7, - "_weight_size": 103.902575, + "_ops": 5.713, + "_weight_size": 103.903, "_docs": """These weights are ported from the original paper.""", }, ) diff --git a/torchvision/models/maxvit.py b/torchvision/models/maxvit.py index bf1f9a01c6f..96c39513278 100644 --- a/torchvision/models/maxvit.py +++ b/torchvision/models/maxvit.py @@ -785,8 +785,8 @@ class MaxVit_T_Weights(WeightsEnum): "acc@5": 96.722, } }, - "_ops": 5.6, - "_weight_size": 118.769322, + "_ops": 5.558, + "_weight_size": 118.769, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, ) diff --git a/torchvision/models/mnasnet.py b/torchvision/models/mnasnet.py index 83b14070d73..bf94b9630f6 100644 --- a/torchvision/models/mnasnet.py +++ b/torchvision/models/mnasnet.py @@ -231,8 +231,8 @@ class MNASNet0_5_Weights(WeightsEnum): "acc@5": 87.490, } }, - "_ops": 0.1, - "_weight_size": 8.591165, + "_ops": 0.104, + "_weight_size": 8.591, "_docs": """These weights reproduce closely the results of the paper.""", }, ) @@ -253,8 +253,8 @@ class MNASNet0_75_Weights(WeightsEnum): "acc@5": 90.496, } }, - "_ops": 0.2, - "_weight_size": 12.302564, + "_ops": 0.215, + "_weight_size": 12.303, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe `_. @@ -277,8 +277,8 @@ class MNASNet1_0_Weights(WeightsEnum): "acc@5": 91.510, } }, - "_ops": 0.3, - "_weight_size": 16.915318, + "_ops": 0.314, + "_weight_size": 16.915, "_docs": """These weights reproduce closely the results of the paper.""", }, ) @@ -299,8 +299,8 @@ class MNASNet1_3_Weights(WeightsEnum): "acc@5": 93.522, } }, - "_ops": 0.5, - "_weight_size": 24.245618, + "_ops": 0.526, + "_weight_size": 24.246, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe `_. diff --git a/torchvision/models/mobilenetv2.py b/torchvision/models/mobilenetv2.py index 7cde8db6513..7920b906c57 100644 --- a/torchvision/models/mobilenetv2.py +++ b/torchvision/models/mobilenetv2.py @@ -194,8 +194,8 @@ class MobileNet_V2_Weights(WeightsEnum): "acc@5": 90.286, } }, - "_ops": 0.3, - "_weight_size": 13.554546, + "_ops": 0.301, + "_weight_size": 13.555, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -211,8 +211,8 @@ class MobileNet_V2_Weights(WeightsEnum): "acc@5": 90.822, } }, - "_ops": 0.3, - "_weight_size": 13.598035, + "_ops": 0.301, + "_weight_size": 13.598, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe diff --git a/torchvision/models/mobilenetv3.py b/torchvision/models/mobilenetv3.py index 07ae7c2e6f6..8ae4b7d602d 100644 --- a/torchvision/models/mobilenetv3.py +++ b/torchvision/models/mobilenetv3.py @@ -307,8 +307,8 @@ class MobileNet_V3_Large_Weights(WeightsEnum): "acc@5": 91.340, } }, - "_ops": 0.2, - "_weight_size": 21.113799, + "_ops": 0.217, + "_weight_size": 21.114, "_docs": """These weights were trained from scratch by using a simple training recipe.""", }, ) @@ -325,8 +325,8 @@ class MobileNet_V3_Large_Weights(WeightsEnum): "acc@5": 92.566, } }, - "_ops": 0.2, - "_weight_size": 21.106828, + "_ops": 0.217, + "_weight_size": 21.107, "_docs": """ These weights improve marginally upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -351,8 +351,8 @@ class MobileNet_V3_Small_Weights(WeightsEnum): "acc@5": 87.402, } }, - "_ops": 0.1, - "_weight_size": 9.829093, + "_ops": 0.057, + "_weight_size": 9.829, "_docs": """ These weights improve upon the results of the original paper by using a simple training recipe. """, diff --git a/torchvision/models/optical_flow/raft.py b/torchvision/models/optical_flow/raft.py index a776735b6af..7aa4684929a 100644 --- a/torchvision/models/optical_flow/raft.py +++ b/torchvision/models/optical_flow/raft.py @@ -552,8 +552,8 @@ class Raft_Large_Weights(WeightsEnum): "Sintel-Train-Finalpass": {"epe": 2.7894}, "Kitti-Train": {"per_image_epe": 5.0172, "fl_all": 17.4506}, }, - "_ops": 211.0, - "_weight_size": 20.128829, + "_ops": 211.007, + "_weight_size": 20.129, "_docs": """These weights were ported from the original paper. They are trained on :class:`~torchvision.datasets.FlyingChairs` + :class:`~torchvision.datasets.FlyingThings3D`.""", @@ -572,8 +572,8 @@ class Raft_Large_Weights(WeightsEnum): "Sintel-Train-Finalpass": {"epe": 2.7161}, "Kitti-Train": {"per_image_epe": 4.5118, "fl_all": 16.0679}, }, - "_ops": 211.0, - "_weight_size": 20.128829, + "_ops": 211.007, + "_weight_size": 20.129, "_docs": """These weights were trained from scratch on :class:`~torchvision.datasets.FlyingChairs` + :class:`~torchvision.datasets.FlyingThings3D`.""", @@ -592,8 +592,8 @@ class Raft_Large_Weights(WeightsEnum): "Sintel-Test-Cleanpass": {"epe": 1.94}, "Sintel-Test-Finalpass": {"epe": 3.18}, }, - "_ops": 211.0, - "_weight_size": 20.128829, + "_ops": 211.007, + "_weight_size": 20.129, "_docs": """ These weights were ported from the original paper. They are trained on :class:`~torchvision.datasets.FlyingChairs` + @@ -618,8 +618,8 @@ class Raft_Large_Weights(WeightsEnum): "Sintel-Test-Cleanpass": {"epe": 1.819}, "Sintel-Test-Finalpass": {"epe": 3.067}, }, - "_ops": 211.0, - "_weight_size": 20.128829, + "_ops": 211.007, + "_weight_size": 20.129, "_docs": """ These weights were trained from scratch. They are pre-trained on :class:`~torchvision.datasets.FlyingChairs` + @@ -644,8 +644,8 @@ class Raft_Large_Weights(WeightsEnum): "_metrics": { "Kitti-Test": {"fl_all": 5.10}, }, - "_ops": 211.0, - "_weight_size": 20.128829, + "_ops": 211.007, + "_weight_size": 20.129, "_docs": """ These weights were ported from the original paper. They are pre-trained on :class:`~torchvision.datasets.FlyingChairs` + @@ -667,8 +667,8 @@ class Raft_Large_Weights(WeightsEnum): "_metrics": { "Kitti-Test": {"fl_all": 5.19}, }, - "_ops": 211.0, - "_weight_size": 20.128829, + "_ops": 211.007, + "_weight_size": 20.129, "_docs": """ These weights were trained from scratch. They are pre-trained on :class:`~torchvision.datasets.FlyingChairs` + @@ -710,8 +710,8 @@ class Raft_Small_Weights(WeightsEnum): "Sintel-Train-Finalpass": {"epe": 3.2790}, "Kitti-Train": {"per_image_epe": 7.6557, "fl_all": 25.2801}, }, - "_ops": 47.7, - "_weight_size": 3.820600, + "_ops": 47.655, + "_weight_size": 3.821, "_docs": """These weights were ported from the original paper. They are trained on :class:`~torchvision.datasets.FlyingChairs` + :class:`~torchvision.datasets.FlyingThings3D`.""", @@ -729,8 +729,8 @@ class Raft_Small_Weights(WeightsEnum): "Sintel-Train-Finalpass": {"epe": 3.2831}, "Kitti-Train": {"per_image_epe": 7.5978, "fl_all": 25.2369}, }, - "_ops": 47.7, - "_weight_size": 3.820600, + "_ops": 47.655, + "_weight_size": 3.821, "_docs": """These weights were trained from scratch on :class:`~torchvision.datasets.FlyingChairs` + :class:`~torchvision.datasets.FlyingThings3D`.""", diff --git a/torchvision/models/quantization/googlenet.py b/torchvision/models/quantization/googlenet.py index 46c0ee400f3..96d3fc2613b 100644 --- a/torchvision/models/quantization/googlenet.py +++ b/torchvision/models/quantization/googlenet.py @@ -123,8 +123,8 @@ class GoogLeNet_QuantizedWeights(WeightsEnum): "acc@5": 89.404, } }, - "_ops": 1.5, - "_weight_size": 12.617729, + "_ops": 1.498, + "_weight_size": 12.618, "_docs": """ These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized weights listed below. diff --git a/torchvision/models/quantization/inception.py b/torchvision/models/quantization/inception.py index e789c8ed3ea..bb6b5aa5b47 100644 --- a/torchvision/models/quantization/inception.py +++ b/torchvision/models/quantization/inception.py @@ -183,8 +183,8 @@ class Inception_V3_QuantizedWeights(WeightsEnum): "acc@5": 93.354, } }, - "_ops": 5.7, - "_weight_size": 23.145652, + "_ops": 5.713, + "_weight_size": 23.146, "_docs": """ These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized weights listed below. diff --git a/torchvision/models/quantization/mobilenetv2.py b/torchvision/models/quantization/mobilenetv2.py index fc21fc6a51f..c85c54f33c7 100644 --- a/torchvision/models/quantization/mobilenetv2.py +++ b/torchvision/models/quantization/mobilenetv2.py @@ -80,8 +80,8 @@ class MobileNet_V2_QuantizedWeights(WeightsEnum): "acc@5": 90.150, } }, - "_ops": 0.3, - "_weight_size": 3.422719, + "_ops": 0.301, + "_weight_size": 3.423, "_docs": """ These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized weights listed below. diff --git a/torchvision/models/quantization/mobilenetv3.py b/torchvision/models/quantization/mobilenetv3.py index 546a3981621..c0a613eddb7 100644 --- a/torchvision/models/quantization/mobilenetv3.py +++ b/torchvision/models/quantization/mobilenetv3.py @@ -175,8 +175,8 @@ class MobileNet_V3_Large_QuantizedWeights(WeightsEnum): "acc@5": 90.858, } }, - "_ops": 0.2, - "_weight_size": 21.553815, + "_ops": 0.217, + "_weight_size": 21.554, "_docs": """ These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized weights listed below. diff --git a/torchvision/models/quantization/resnet.py b/torchvision/models/quantization/resnet.py index b858a402ede..b6f4165e654 100644 --- a/torchvision/models/quantization/resnet.py +++ b/torchvision/models/quantization/resnet.py @@ -175,8 +175,8 @@ class ResNet18_QuantizedWeights(WeightsEnum): "acc@5": 88.882, } }, - "_ops": 1.8, - "_weight_size": 11.238080, + "_ops": 1.814, + "_weight_size": 11.238, }, ) DEFAULT = IMAGENET1K_FBGEMM_V1 @@ -196,8 +196,8 @@ class ResNet50_QuantizedWeights(WeightsEnum): "acc@5": 92.814, } }, - "_ops": 4.1, - "_weight_size": 24.758719, + "_ops": 4.089, + "_weight_size": 24.759, }, ) IMAGENET1K_FBGEMM_V2 = Weights( @@ -213,8 +213,8 @@ class ResNet50_QuantizedWeights(WeightsEnum): "acc@5": 94.976, } }, - "_ops": 4.1, - "_weight_size": 24.953237, + "_ops": 4.089, + "_weight_size": 24.953, }, ) DEFAULT = IMAGENET1K_FBGEMM_V2 @@ -234,8 +234,8 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum): "acc@5": 94.480, } }, - "_ops": 16.4, - "_weight_size": 86.034273, + "_ops": 16.414, + "_weight_size": 86.034, }, ) IMAGENET1K_FBGEMM_V2 = Weights( @@ -251,8 +251,8 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum): "acc@5": 96.132, } }, - "_ops": 16.4, - "_weight_size": 86.645276, + "_ops": 16.414, + "_weight_size": 86.645, }, ) DEFAULT = IMAGENET1K_FBGEMM_V2 @@ -273,8 +273,8 @@ class ResNeXt101_64X4D_QuantizedWeights(WeightsEnum): "acc@5": 96.326, } }, - "_ops": 15.5, - "_weight_size": 81.556409, + "_ops": 15.46, + "_weight_size": 81.556, }, ) DEFAULT = IMAGENET1K_FBGEMM_V1 diff --git a/torchvision/models/quantization/shufflenetv2.py b/torchvision/models/quantization/shufflenetv2.py index 574c0918546..11d89c0b32d 100644 --- a/torchvision/models/quantization/shufflenetv2.py +++ b/torchvision/models/quantization/shufflenetv2.py @@ -139,8 +139,8 @@ class ShuffleNet_V2_X0_5_QuantizedWeights(WeightsEnum): "acc@5": 79.780, } }, - "_ops": 0.0, - "_weight_size": 1.500871, + "_ops": 0.04, + "_weight_size": 1.501, }, ) DEFAULT = IMAGENET1K_FBGEMM_V1 @@ -160,8 +160,8 @@ class ShuffleNet_V2_X1_0_QuantizedWeights(WeightsEnum): "acc@5": 87.582, } }, - "_ops": 0.1, - "_weight_size": 2.333858, + "_ops": 0.145, + "_weight_size": 2.334, }, ) DEFAULT = IMAGENET1K_FBGEMM_V1 @@ -182,8 +182,8 @@ class ShuffleNet_V2_X1_5_QuantizedWeights(WeightsEnum): "acc@5": 90.700, } }, - "_ops": 0.3, - "_weight_size": 3.671769, + "_ops": 0.296, + "_weight_size": 3.672, }, ) DEFAULT = IMAGENET1K_FBGEMM_V1 @@ -204,8 +204,8 @@ class ShuffleNet_V2_X2_0_QuantizedWeights(WeightsEnum): "acc@5": 92.488, } }, - "_ops": 0.6, - "_weight_size": 7.467118, + "_ops": 0.583, + "_weight_size": 7.467, }, ) DEFAULT = IMAGENET1K_FBGEMM_V1 diff --git a/torchvision/models/regnet.py b/torchvision/models/regnet.py index aa89f4715c2..a60262c3b34 100644 --- a/torchvision/models/regnet.py +++ b/torchvision/models/regnet.py @@ -428,8 +428,8 @@ class RegNet_Y_400MF_Weights(WeightsEnum): "acc@5": 91.716, } }, - "_ops": 0.4, - "_weight_size": 16.805909, + "_ops": 0.402, + "_weight_size": 16.806, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -446,8 +446,8 @@ class RegNet_Y_400MF_Weights(WeightsEnum): "acc@5": 92.742, } }, - "_ops": 0.4, - "_weight_size": 16.805909, + "_ops": 0.402, + "_weight_size": 16.806, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -472,8 +472,8 @@ class RegNet_Y_800MF_Weights(WeightsEnum): "acc@5": 93.136, } }, - "_ops": 0.8, - "_weight_size": 24.773644, + "_ops": 0.834, + "_weight_size": 24.774, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -490,8 +490,8 @@ class RegNet_Y_800MF_Weights(WeightsEnum): "acc@5": 94.502, } }, - "_ops": 0.8, - "_weight_size": 24.773522, + "_ops": 0.834, + "_weight_size": 24.774, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -516,8 +516,8 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum): "acc@5": 93.966, } }, - "_ops": 1.6, - "_weight_size": 43.152310, + "_ops": 1.612, + "_weight_size": 43.152, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -534,8 +534,8 @@ class RegNet_Y_1_6GF_Weights(WeightsEnum): "acc@5": 95.444, } }, - "_ops": 1.6, - "_weight_size": 43.152310, + "_ops": 1.612, + "_weight_size": 43.152, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -560,8 +560,8 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum): "acc@5": 94.576, } }, - "_ops": 3.2, - "_weight_size": 74.566991, + "_ops": 3.176, + "_weight_size": 74.567, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -578,8 +578,8 @@ class RegNet_Y_3_2GF_Weights(WeightsEnum): "acc@5": 95.972, } }, - "_ops": 3.2, - "_weight_size": 74.566991, + "_ops": 3.176, + "_weight_size": 74.567, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -604,8 +604,8 @@ class RegNet_Y_8GF_Weights(WeightsEnum): "acc@5": 95.048, } }, - "_ops": 8.5, - "_weight_size": 150.701436, + "_ops": 8.473, + "_weight_size": 150.701, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -622,8 +622,8 @@ class RegNet_Y_8GF_Weights(WeightsEnum): "acc@5": 96.330, } }, - "_ops": 8.5, - "_weight_size": 150.701436, + "_ops": 8.473, + "_weight_size": 150.701, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -648,8 +648,8 @@ class RegNet_Y_16GF_Weights(WeightsEnum): "acc@5": 95.240, } }, - "_ops": 15.9, - "_weight_size": 319.490335, + "_ops": 15.912, + "_weight_size": 319.49, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -666,8 +666,8 @@ class RegNet_Y_16GF_Weights(WeightsEnum): "acc@5": 96.328, } }, - "_ops": 15.9, - "_weight_size": 319.490335, + "_ops": 15.912, + "_weight_size": 319.49, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -689,8 +689,8 @@ class RegNet_Y_16GF_Weights(WeightsEnum): "acc@5": 98.054, } }, - "_ops": 46.7, - "_weight_size": 319.490335, + "_ops": 46.735, + "_weight_size": 319.49, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original `SWAG `_ weights on ImageNet-1K data. @@ -712,8 +712,8 @@ class RegNet_Y_16GF_Weights(WeightsEnum): "acc@5": 97.244, } }, - "_ops": 15.9, - "_weight_size": 319.490335, + "_ops": 15.912, + "_weight_size": 319.49, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk weights and a linear classifier learnt on top of them trained on ImageNet-1K data. @@ -737,8 +737,8 @@ class RegNet_Y_32GF_Weights(WeightsEnum): "acc@5": 95.340, } }, - "_ops": 32.3, - "_weight_size": 554.076371, + "_ops": 32.28, + "_weight_size": 554.076, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -755,8 +755,8 @@ class RegNet_Y_32GF_Weights(WeightsEnum): "acc@5": 96.498, } }, - "_ops": 32.3, - "_weight_size": 554.076371, + "_ops": 32.28, + "_weight_size": 554.076, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -778,8 +778,8 @@ class RegNet_Y_32GF_Weights(WeightsEnum): "acc@5": 98.362, } }, - "_ops": 94.8, - "_weight_size": 554.076371, + "_ops": 94.826, + "_weight_size": 554.076, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original `SWAG `_ weights on ImageNet-1K data. @@ -801,8 +801,8 @@ class RegNet_Y_32GF_Weights(WeightsEnum): "acc@5": 97.480, } }, - "_ops": 32.3, - "_weight_size": 554.076371, + "_ops": 32.28, + "_weight_size": 554.076, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk weights and a linear classifier learnt on top of them trained on ImageNet-1K data. @@ -827,8 +827,8 @@ class RegNet_Y_128GF_Weights(WeightsEnum): "acc@5": 98.682, } }, - "_ops": 374.6, - "_weight_size": 2461.563993, + "_ops": 374.57, + "_weight_size": 2461.564, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original `SWAG `_ weights on ImageNet-1K data. @@ -850,8 +850,8 @@ class RegNet_Y_128GF_Weights(WeightsEnum): "acc@5": 97.844, } }, - "_ops": 127.5, - "_weight_size": 2461.563993, + "_ops": 127.518, + "_weight_size": 2461.564, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk weights and a linear classifier learnt on top of them trained on ImageNet-1K data. @@ -875,8 +875,8 @@ class RegNet_X_400MF_Weights(WeightsEnum): "acc@5": 90.950, } }, - "_ops": 0.4, - "_weight_size": 21.258035, + "_ops": 0.414, + "_weight_size": 21.258, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -893,8 +893,8 @@ class RegNet_X_400MF_Weights(WeightsEnum): "acc@5": 92.322, } }, - "_ops": 0.4, - "_weight_size": 21.256570, + "_ops": 0.414, + "_weight_size": 21.257, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -920,7 +920,7 @@ class RegNet_X_800MF_Weights(WeightsEnum): } }, "_ops": 0.8, - "_weight_size": 27.945130, + "_weight_size": 27.945, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -938,7 +938,7 @@ class RegNet_X_800MF_Weights(WeightsEnum): } }, "_ops": 0.8, - "_weight_size": 27.945130, + "_weight_size": 27.945, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -963,8 +963,8 @@ class RegNet_X_1_6GF_Weights(WeightsEnum): "acc@5": 93.440, } }, - "_ops": 1.6, - "_weight_size": 35.339471, + "_ops": 1.603, + "_weight_size": 35.339, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -981,8 +981,8 @@ class RegNet_X_1_6GF_Weights(WeightsEnum): "acc@5": 94.922, } }, - "_ops": 1.6, - "_weight_size": 35.339471, + "_ops": 1.603, + "_weight_size": 35.339, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -1007,8 +1007,8 @@ class RegNet_X_3_2GF_Weights(WeightsEnum): "acc@5": 93.992, } }, - "_ops": 3.2, - "_weight_size": 58.755979, + "_ops": 3.177, + "_weight_size": 58.756, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -1025,8 +1025,8 @@ class RegNet_X_3_2GF_Weights(WeightsEnum): "acc@5": 95.430, } }, - "_ops": 3.2, - "_weight_size": 58.755979, + "_ops": 3.177, + "_weight_size": 58.756, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -1051,8 +1051,8 @@ class RegNet_X_8GF_Weights(WeightsEnum): "acc@5": 94.686, } }, - "_ops": 8.0, - "_weight_size": 151.455937, + "_ops": 7.995, + "_weight_size": 151.456, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -1069,8 +1069,8 @@ class RegNet_X_8GF_Weights(WeightsEnum): "acc@5": 95.678, } }, - "_ops": 8.0, - "_weight_size": 151.455937, + "_ops": 7.995, + "_weight_size": 151.456, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -1095,8 +1095,8 @@ class RegNet_X_16GF_Weights(WeightsEnum): "acc@5": 94.944, } }, - "_ops": 15.9, - "_weight_size": 207.627419, + "_ops": 15.941, + "_weight_size": 207.627, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -1113,8 +1113,8 @@ class RegNet_X_16GF_Weights(WeightsEnum): "acc@5": 96.196, } }, - "_ops": 15.9, - "_weight_size": 207.627419, + "_ops": 15.941, + "_weight_size": 207.627, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe @@ -1139,8 +1139,8 @@ class RegNet_X_32GF_Weights(WeightsEnum): "acc@5": 95.248, } }, - "_ops": 31.7, - "_weight_size": 412.039433, + "_ops": 31.736, + "_weight_size": 412.039, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -1157,8 +1157,8 @@ class RegNet_X_32GF_Weights(WeightsEnum): "acc@5": 96.288, } }, - "_ops": 31.7, - "_weight_size": 412.039433, + "_ops": 31.736, + "_weight_size": 412.039, "_docs": """ These weights improve upon the results of the original paper by using a modified version of TorchVision's `new training recipe diff --git a/torchvision/models/resnet.py b/torchvision/models/resnet.py index 56c41418eec..80eb4666907 100644 --- a/torchvision/models/resnet.py +++ b/torchvision/models/resnet.py @@ -323,8 +323,8 @@ class ResNet18_Weights(WeightsEnum): "acc@5": 89.078, } }, - "_ops": 1.8, - "_weight_size": 44.661113, + "_ops": 1.814, + "_weight_size": 44.661, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -345,8 +345,8 @@ class ResNet34_Weights(WeightsEnum): "acc@5": 91.420, } }, - "_ops": 3.7, - "_weight_size": 83.274669, + "_ops": 3.664, + "_weight_size": 83.275, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -367,8 +367,8 @@ class ResNet50_Weights(WeightsEnum): "acc@5": 92.862, } }, - "_ops": 4.1, - "_weight_size": 97.780545, + "_ops": 4.089, + "_weight_size": 97.781, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -385,8 +385,8 @@ class ResNet50_Weights(WeightsEnum): "acc@5": 95.434, } }, - "_ops": 4.1, - "_weight_size": 97.790162, + "_ops": 4.089, + "_weight_size": 97.79, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe `_. @@ -410,8 +410,8 @@ class ResNet101_Weights(WeightsEnum): "acc@5": 93.546, } }, - "_ops": 7.8, - "_weight_size": 170.511188, + "_ops": 7.801, + "_weight_size": 170.511, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -428,8 +428,8 @@ class ResNet101_Weights(WeightsEnum): "acc@5": 95.780, } }, - "_ops": 7.8, - "_weight_size": 170.530362, + "_ops": 7.801, + "_weight_size": 170.53, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe `_. @@ -453,8 +453,8 @@ class ResNet152_Weights(WeightsEnum): "acc@5": 94.046, } }, - "_ops": 11.5, - "_weight_size": 230.434152, + "_ops": 11.514, + "_weight_size": 230.434, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -471,8 +471,8 @@ class ResNet152_Weights(WeightsEnum): "acc@5": 96.002, } }, - "_ops": 11.5, - "_weight_size": 230.473687, + "_ops": 11.514, + "_weight_size": 230.474, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe `_. @@ -496,8 +496,8 @@ class ResNeXt50_32X4D_Weights(WeightsEnum): "acc@5": 93.698, } }, - "_ops": 4.2, - "_weight_size": 95.788646, + "_ops": 4.23, + "_weight_size": 95.789, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -514,8 +514,8 @@ class ResNeXt50_32X4D_Weights(WeightsEnum): "acc@5": 95.340, } }, - "_ops": 4.2, - "_weight_size": 95.833192, + "_ops": 4.23, + "_weight_size": 95.833, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe `_. @@ -539,8 +539,8 @@ class ResNeXt101_32X8D_Weights(WeightsEnum): "acc@5": 94.526, } }, - "_ops": 16.4, - "_weight_size": 339.586349, + "_ops": 16.414, + "_weight_size": 339.586, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -557,8 +557,8 @@ class ResNeXt101_32X8D_Weights(WeightsEnum): "acc@5": 96.228, } }, - "_ops": 16.4, - "_weight_size": 339.673062, + "_ops": 16.414, + "_weight_size": 339.673, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe `_. @@ -582,8 +582,8 @@ class ResNeXt101_64X4D_Weights(WeightsEnum): "acc@5": 96.454, } }, - "_ops": 15.5, - "_weight_size": 319.317594, + "_ops": 15.46, + "_weight_size": 319.318, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe `_. @@ -607,8 +607,8 @@ class Wide_ResNet50_2_Weights(WeightsEnum): "acc@5": 94.086, } }, - "_ops": 11.4, - "_weight_size": 131.820194, + "_ops": 11.398, + "_weight_size": 131.82, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -625,8 +625,8 @@ class Wide_ResNet50_2_Weights(WeightsEnum): "acc@5": 95.758, } }, - "_ops": 11.4, - "_weight_size": 263.124207, + "_ops": 11.398, + "_weight_size": 263.124, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe `_. @@ -650,8 +650,8 @@ class Wide_ResNet101_2_Weights(WeightsEnum): "acc@5": 94.284, } }, - "_ops": 22.8, - "_weight_size": 242.896219, + "_ops": 22.753, + "_weight_size": 242.896, "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""", }, ) @@ -668,8 +668,8 @@ class Wide_ResNet101_2_Weights(WeightsEnum): "acc@5": 96.020, } }, - "_ops": 22.8, - "_weight_size": 484.747281, + "_ops": 22.753, + "_weight_size": 484.747, "_docs": """ These weights improve upon the results of the original paper by using TorchVision's `new training recipe `_. diff --git a/torchvision/models/segmentation/deeplabv3.py b/torchvision/models/segmentation/deeplabv3.py index f1a2cc4200d..ce586738089 100644 --- a/torchvision/models/segmentation/deeplabv3.py +++ b/torchvision/models/segmentation/deeplabv3.py @@ -152,8 +152,8 @@ class DeepLabV3_ResNet50_Weights(WeightsEnum): "pixel_acc": 92.4, } }, - "_ops": 178.7, - "_weight_size": 160.514977, + "_ops": 178.722, + "_weight_size": 160.515, }, ) DEFAULT = COCO_WITH_VOC_LABELS_V1 @@ -173,8 +173,8 @@ class DeepLabV3_ResNet101_Weights(WeightsEnum): "pixel_acc": 92.4, } }, - "_ops": 258.7, - "_weight_size": 233.216800, + "_ops": 258.743, + "_weight_size": 233.217, }, ) DEFAULT = COCO_WITH_VOC_LABELS_V1 @@ -194,8 +194,8 @@ class DeepLabV3_MobileNet_V3_Large_Weights(WeightsEnum): "pixel_acc": 91.2, } }, - "_ops": 10.5, - "_weight_size": 42.301330, + "_ops": 10.452, + "_weight_size": 42.301, }, ) DEFAULT = COCO_WITH_VOC_LABELS_V1 diff --git a/torchvision/models/segmentation/fcn.py b/torchvision/models/segmentation/fcn.py index 1a4dffd24b7..8031a7d0d30 100644 --- a/torchvision/models/segmentation/fcn.py +++ b/torchvision/models/segmentation/fcn.py @@ -71,8 +71,8 @@ class FCN_ResNet50_Weights(WeightsEnum): "pixel_acc": 91.4, } }, - "_ops": 152.7, - "_weight_size": 135.009211, + "_ops": 152.717, + "_weight_size": 135.009, }, ) DEFAULT = COCO_WITH_VOC_LABELS_V1 @@ -92,8 +92,8 @@ class FCN_ResNet101_Weights(WeightsEnum): "pixel_acc": 91.9, } }, - "_ops": 232.7, - "_weight_size": 207.711034, + "_ops": 232.738, + "_weight_size": 207.711, }, ) DEFAULT = COCO_WITH_VOC_LABELS_V1 diff --git a/torchvision/models/segmentation/lraspp.py b/torchvision/models/segmentation/lraspp.py index 551053bc63d..e90a2917d40 100644 --- a/torchvision/models/segmentation/lraspp.py +++ b/torchvision/models/segmentation/lraspp.py @@ -108,8 +108,8 @@ class LRASPP_MobileNet_V3_Large_Weights(WeightsEnum): "pixel_acc": 91.2, } }, - "_ops": 2.1, - "_weight_size": 12.490331, + "_ops": 2.086, + "_weight_size": 12.49, "_docs": """ These weights were trained on a subset of COCO, using only the 20 categories that are present in the Pascal VOC dataset. diff --git a/torchvision/models/shufflenetv2.py b/torchvision/models/shufflenetv2.py index b61cc2560bd..005b338b7e6 100644 --- a/torchvision/models/shufflenetv2.py +++ b/torchvision/models/shufflenetv2.py @@ -204,8 +204,8 @@ class ShuffleNet_V2_X0_5_Weights(WeightsEnum): "acc@5": 81.746, } }, - "_ops": 0.0, - "_weight_size": 5.281570, + "_ops": 0.04, + "_weight_size": 5.282, "_docs": """These weights were trained from scratch to reproduce closely the results of the paper.""", }, ) @@ -226,8 +226,8 @@ class ShuffleNet_V2_X1_0_Weights(WeightsEnum): "acc@5": 88.316, } }, - "_ops": 0.1, - "_weight_size": 8.791250, + "_ops": 0.145, + "_weight_size": 8.791, "_docs": """These weights were trained from scratch to reproduce closely the results of the paper.""", }, ) @@ -248,8 +248,8 @@ class ShuffleNet_V2_X1_5_Weights(WeightsEnum): "acc@5": 91.086, } }, - "_ops": 0.3, - "_weight_size": 13.557034, + "_ops": 0.296, + "_weight_size": 13.557, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe `_. @@ -273,8 +273,8 @@ class ShuffleNet_V2_X2_0_Weights(WeightsEnum): "acc@5": 93.006, } }, - "_ops": 0.6, - "_weight_size": 28.432767, + "_ops": 0.583, + "_weight_size": 28.433, "_docs": """ These weights were trained from scratch by using TorchVision's `new training recipe `_. diff --git a/torchvision/models/squeezenet.py b/torchvision/models/squeezenet.py index d2a467c1f18..94f5d50e6e9 100644 --- a/torchvision/models/squeezenet.py +++ b/torchvision/models/squeezenet.py @@ -135,8 +135,8 @@ class SqueezeNet1_0_Weights(WeightsEnum): "acc@5": 80.420, } }, - "_ops": 0.8, - "_weight_size": 4.778434, + "_ops": 0.819, + "_weight_size": 4.778, }, ) DEFAULT = IMAGENET1K_V1 @@ -156,8 +156,8 @@ class SqueezeNet1_1_Weights(WeightsEnum): "acc@5": 80.624, } }, - "_ops": 0.3, - "_weight_size": 4.729117, + "_ops": 0.349, + "_weight_size": 4.729, }, ) DEFAULT = IMAGENET1K_V1 diff --git a/torchvision/models/swin_transformer.py b/torchvision/models/swin_transformer.py index 50762c0145d..47498e66d7e 100644 --- a/torchvision/models/swin_transformer.py +++ b/torchvision/models/swin_transformer.py @@ -660,8 +660,8 @@ class Swin_T_Weights(WeightsEnum): "acc@5": 95.776, } }, - "_ops": 4.5, - "_weight_size": 108.190383, + "_ops": 4.491, + "_weight_size": 108.19, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, ) @@ -685,8 +685,8 @@ class Swin_S_Weights(WeightsEnum): "acc@5": 96.360, } }, - "_ops": 8.7, - "_weight_size": 189.786254, + "_ops": 8.741, + "_weight_size": 189.786, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, ) @@ -710,8 +710,8 @@ class Swin_B_Weights(WeightsEnum): "acc@5": 96.640, } }, - "_ops": 15.4, - "_weight_size": 335.363585, + "_ops": 15.431, + "_weight_size": 335.364, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, ) @@ -735,8 +735,8 @@ class Swin_V2_T_Weights(WeightsEnum): "acc@5": 96.132, } }, - "_ops": 5.9, - "_weight_size": 108.625840, + "_ops": 5.94, + "_weight_size": 108.626, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, ) @@ -760,8 +760,8 @@ class Swin_V2_S_Weights(WeightsEnum): "acc@5": 96.816, } }, - "_ops": 11.5, - "_weight_size": 190.674577, + "_ops": 11.546, + "_weight_size": 190.675, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, ) @@ -785,8 +785,8 @@ class Swin_V2_B_Weights(WeightsEnum): "acc@5": 96.864, } }, - "_ops": 20.3, - "_weight_size": 336.371781, + "_ops": 20.325, + "_weight_size": 336.372, "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.""", }, ) diff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py index 777a376c87e..6725dedd404 100644 --- a/torchvision/models/vgg.py +++ b/torchvision/models/vgg.py @@ -127,8 +127,8 @@ class VGG11_Weights(WeightsEnum): "acc@5": 88.628, } }, - "_ops": 7.6, - "_weight_size": 506.840077, + "_ops": 7.609, + "_weight_size": 506.84, }, ) DEFAULT = IMAGENET1K_V1 @@ -147,8 +147,8 @@ class VGG11_BN_Weights(WeightsEnum): "acc@5": 89.810, } }, - "_ops": 7.6, - "_weight_size": 506.881400, + "_ops": 7.609, + "_weight_size": 506.881, }, ) DEFAULT = IMAGENET1K_V1 @@ -167,8 +167,8 @@ class VGG13_Weights(WeightsEnum): "acc@5": 89.246, } }, - "_ops": 11.3, - "_weight_size": 507.545068, + "_ops": 11.308, + "_weight_size": 507.545, }, ) DEFAULT = IMAGENET1K_V1 @@ -187,8 +187,8 @@ class VGG13_BN_Weights(WeightsEnum): "acc@5": 90.374, } }, - "_ops": 11.3, - "_weight_size": 507.589627, + "_ops": 11.308, + "_weight_size": 507.59, }, ) DEFAULT = IMAGENET1K_V1 @@ -207,8 +207,8 @@ class VGG16_Weights(WeightsEnum): "acc@5": 90.382, } }, - "_ops": 15.5, - "_weight_size": 527.795678, + "_ops": 15.47, + "_weight_size": 527.796, }, ) IMAGENET1K_FEATURES = Weights( @@ -231,8 +231,8 @@ class VGG16_Weights(WeightsEnum): "acc@5": float("nan"), } }, - "_ops": 15.5, - "_weight_size": 527.801824, + "_ops": 15.47, + "_weight_size": 527.802, "_docs": """ These weights can't be used for classification because they are missing values in the `classifier` module. Only the `features` module has valid values and can be used for feature extraction. The weights @@ -256,8 +256,8 @@ class VGG16_BN_Weights(WeightsEnum): "acc@5": 91.516, } }, - "_ops": 15.5, - "_weight_size": 527.866207, + "_ops": 15.47, + "_weight_size": 527.866, }, ) DEFAULT = IMAGENET1K_V1 @@ -276,8 +276,8 @@ class VGG19_Weights(WeightsEnum): "acc@5": 90.876, } }, - "_ops": 19.6, - "_weight_size": 548.051225, + "_ops": 19.632, + "_weight_size": 548.051, }, ) DEFAULT = IMAGENET1K_V1 @@ -296,8 +296,8 @@ class VGG19_BN_Weights(WeightsEnum): "acc@5": 91.842, } }, - "_ops": 19.6, - "_weight_size": 548.142819, + "_ops": 19.632, + "_weight_size": 548.143, }, ) DEFAULT = IMAGENET1K_V1 diff --git a/torchvision/models/video/mvit.py b/torchvision/models/video/mvit.py index df0429269e3..d20d6e907f1 100644 --- a/torchvision/models/video/mvit.py +++ b/torchvision/models/video/mvit.py @@ -624,8 +624,8 @@ class MViT_V1_B_Weights(WeightsEnum): "acc@5": 93.582, } }, - "_ops": 70.6, - "_weight_size": 139.764235, + "_ops": 70.599, + "_weight_size": 139.764, }, ) DEFAULT = KINETICS400_V1 @@ -657,8 +657,8 @@ class MViT_V2_S_Weights(WeightsEnum): "acc@5": 94.665, } }, - "_ops": 64.2, - "_weight_size": 131.883704, + "_ops": 64.224, + "_weight_size": 131.884, }, ) DEFAULT = KINETICS400_V1 diff --git a/torchvision/models/video/resnet.py b/torchvision/models/video/resnet.py index 0e765da65ec..6f5bd876457 100644 --- a/torchvision/models/video/resnet.py +++ b/torchvision/models/video/resnet.py @@ -332,8 +332,8 @@ class R3D_18_Weights(WeightsEnum): "acc@5": 83.479, } }, - "_ops": 40.7, - "_weight_size": 127.359406, + "_ops": 40.697, + "_weight_size": 127.359, }, ) DEFAULT = KINETICS400_V1 @@ -352,8 +352,8 @@ class MC3_18_Weights(WeightsEnum): "acc@5": 84.130, } }, - "_ops": 43.3, - "_weight_size": 44.671906, + "_ops": 43.343, + "_weight_size": 44.672, }, ) DEFAULT = KINETICS400_V1 @@ -372,8 +372,8 @@ class R2Plus1D_18_Weights(WeightsEnum): "acc@5": 86.175, } }, - "_ops": 40.5, - "_weight_size": 120.318409, + "_ops": 40.519, + "_weight_size": 120.318, }, ) DEFAULT = KINETICS400_V1 diff --git a/torchvision/models/video/s3d.py b/torchvision/models/video/s3d.py index da8337024d2..64874712f66 100644 --- a/torchvision/models/video/s3d.py +++ b/torchvision/models/video/s3d.py @@ -175,8 +175,8 @@ class S3D_Weights(WeightsEnum): "acc@5": 88.050, } }, - "_ops": 18.0, - "_weight_size": 31.971929, + "_ops": 17.979, + "_weight_size": 31.972, }, ) DEFAULT = KINETICS400_V1 diff --git a/torchvision/models/vision_transformer.py b/torchvision/models/vision_transformer.py index d77f90255b8..06a47c03ae6 100644 --- a/torchvision/models/vision_transformer.py +++ b/torchvision/models/vision_transformer.py @@ -363,8 +363,8 @@ class ViT_B_16_Weights(WeightsEnum): "acc@5": 95.318, } }, - "_ops": 17.6, - "_weight_size": 330.284623, + "_ops": 17.564, + "_weight_size": 330.285, "_docs": """ These weights were trained from scratch by using a modified version of `DeIT `_'s training recipe. @@ -389,8 +389,8 @@ class ViT_B_16_Weights(WeightsEnum): "acc@5": 97.650, } }, - "_ops": 55.5, - "_weight_size": 331.397904, + "_ops": 55.484, + "_weight_size": 331.398, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original `SWAG `_ weights on ImageNet-1K data. @@ -416,8 +416,8 @@ class ViT_B_16_Weights(WeightsEnum): "acc@5": 96.180, } }, - "_ops": 17.6, - "_weight_size": 330.284623, + "_ops": 17.564, + "_weight_size": 330.285, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk weights and a linear classifier learnt on top of them trained on ImageNet-1K data. @@ -442,8 +442,8 @@ class ViT_B_32_Weights(WeightsEnum): "acc@5": 92.466, } }, - "_ops": 4.4, - "_weight_size": 336.603959, + "_ops": 4.409, + "_weight_size": 336.604, "_docs": """ These weights were trained from scratch by using a modified version of `DeIT `_'s training recipe. @@ -468,8 +468,8 @@ class ViT_L_16_Weights(WeightsEnum): "acc@5": 94.638, } }, - "_ops": 61.6, - "_weight_size": 1161.023240, + "_ops": 61.555, + "_weight_size": 1161.023, "_docs": """ These weights were trained from scratch by using a modified version of TorchVision's `new training recipe @@ -495,8 +495,8 @@ class ViT_L_16_Weights(WeightsEnum): "acc@5": 98.512, } }, - "_ops": 362.0, - "_weight_size": 1164.257615, + "_ops": 361.986, + "_weight_size": 1164.258, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original `SWAG `_ weights on ImageNet-1K data. @@ -522,8 +522,8 @@ class ViT_L_16_Weights(WeightsEnum): "acc@5": 97.422, } }, - "_ops": 61.6, - "_weight_size": 1161.023240, + "_ops": 61.555, + "_weight_size": 1161.023, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk weights and a linear classifier learnt on top of them trained on ImageNet-1K data. @@ -548,8 +548,8 @@ class ViT_L_32_Weights(WeightsEnum): "acc@5": 93.07, } }, - "_ops": 15.4, - "_weight_size": 1169.448960, + "_ops": 15.378, + "_weight_size": 1169.449, "_docs": """ These weights were trained from scratch by using a modified version of `DeIT `_'s training recipe. @@ -578,8 +578,8 @@ class ViT_H_14_Weights(WeightsEnum): "acc@5": 98.694, } }, - "_ops": 1016.7, - "_weight_size": 2416.643174, + "_ops": 1016.717, + "_weight_size": 2416.643, "_docs": """ These weights are learnt via transfer learning by end-to-end fine-tuning the original `SWAG `_ weights on ImageNet-1K data. @@ -605,8 +605,8 @@ class ViT_H_14_Weights(WeightsEnum): "acc@5": 97.730, } }, - "_ops": 167.3, - "_weight_size": 2411.208604, + "_ops": 167.295, + "_weight_size": 2411.209, "_docs": """ These weights are composed of the original frozen `SWAG `_ trunk weights and a linear classifier learnt on top of them trained on ImageNet-1K data. From 9730f3bf46fc44efb94c092c1366edd8fa64f145 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Fri, 11 Nov 2022 16:22:52 +0000 Subject: [PATCH 14/15] Change naming of columns. --- .gitignore | 1 - docs/source/conf.py | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index d39404b1fdf..f16b54061e0 100644 --- a/.gitignore +++ b/.gitignore @@ -35,7 +35,6 @@ gen.yml *.orig *-checkpoint.ipynb *.venv -*.DS_Store ## Xcode User settings xcuserdata/ diff --git a/docs/source/conf.py b/docs/source/conf.py index 30c42b75652..195243c35d5 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -364,9 +364,9 @@ def inject_weight_metadata(app, what, name, obj, options, lines): v = f"{v_sample}, ... ({len(v)-max_visible} omitted)" if len(v) > max_visible else v_sample elif k == "_ops": if obj.__name__.endswith("_QuantizedWeights"): - k = "integer operations (GOPs)" + k = "giga instructions per sec (GIPS)" else: - k = "floating point operations (GFLOPs)" + k = "giga floating-point operations per sec (GFLOPS)" elif k == "_weight_size": k = "weights file size (MB)" @@ -393,7 +393,7 @@ def generate_weights_table(module, table_name, metrics, dataset, include_pattern if exclude_patterns is not None: weights = [w for w in weights if all(p not in str(w) for p in exclude_patterns)] - ops_name = "GOPs" if "QuantizedWeights" in weights_endswith else "GFLOPs" + ops_name = "GIPS" if "QuantizedWeights" in weights_endswith else "GFLOPS" metrics_keys, metrics_names = zip(*metrics) column_names = ( From cf0e3c07d4e7760cc1450de0e6b0828ce6c1eed5 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Fri, 11 Nov 2022 16:49:56 +0000 Subject: [PATCH 15/15] Update tables --- docs/source/conf.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 195243c35d5..2d1eb2d7a19 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -364,11 +364,11 @@ def inject_weight_metadata(app, what, name, obj, options, lines): v = f"{v_sample}, ... ({len(v)-max_visible} omitted)" if len(v) > max_visible else v_sample elif k == "_ops": if obj.__name__.endswith("_QuantizedWeights"): - k = "giga instructions per sec (GIPS)" + v = f"{v} giga instructions per sec" else: - k = "giga floating-point operations per sec (GFLOPS)" + v = f"{v} giga floating-point operations per sec" elif k == "_weight_size": - k = "weights file size (MB)" + v = f"{v} MB (file size)" table.append((str(k), str(v))) table = tabulate(table, tablefmt="rst")