Skip to content

Commit deba056

Browse files
toni057Toni Blaslovdatumbox
authored
Adding FLOPs and size to model metadata (#6936)
* Adding FLOPs and size to model metadata * Adding weight size to quantization models * Small refactor of rich metadata * Removing unused code * Fixing wrong entries * Adding .DS_Store to gitignore * Renaming _flops to _ops * Adding number of operations to quantization models * Reflecting _flops change to _ops * Renamed ops and weight size in individual model doc pages * Linter fixes * Rounding ops to first decimal * Rounding num ops and sizes to 3 decimals * Change naming of columns. * Update tables Co-authored-by: Toni Blaslov <[email protected]> Co-authored-by: Vasilis Vryniotis <[email protected]>
1 parent ad2ecea commit deba056

39 files changed

+353
-10
lines changed

docs/source/conf.py

Lines changed: 26 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -362,6 +362,14 @@ def inject_weight_metadata(app, what, name, obj, options, lines):
362362
max_visible = 3
363363
v_sample = ", ".join(v[:max_visible])
364364
v = f"{v_sample}, ... ({len(v)-max_visible} omitted)" if len(v) > max_visible else v_sample
365+
elif k == "_ops":
366+
if obj.__name__.endswith("_QuantizedWeights"):
367+
v = f"{v} giga instructions per sec"
368+
else:
369+
v = f"{v} giga floating-point operations per sec"
370+
elif k == "_weight_size":
371+
v = f"{v} MB (file size)"
372+
365373
table.append((str(k), str(v)))
366374
table = tabulate(table, tablefmt="rst")
367375
lines += [".. rst-class:: table-weights"] # Custom CSS class, see custom_torchvision.css
@@ -385,27 +393,38 @@ def generate_weights_table(module, table_name, metrics, dataset, include_pattern
385393
if exclude_patterns is not None:
386394
weights = [w for w in weights if all(p not in str(w) for p in exclude_patterns)]
387395

396+
ops_name = "GIPS" if "QuantizedWeights" in weights_endswith else "GFLOPS"
397+
388398
metrics_keys, metrics_names = zip(*metrics)
389-
column_names = ["Weight"] + list(metrics_names) + ["Params", "Recipe"]
399+
column_names = (
400+
["Weight"] + list(metrics_names) + ["Params"] + [ops_name, "Size (MB)", "Recipe"]
401+
) # Final column order
390402
column_names = [f"**{name}**" for name in column_names] # Add bold
391403

392-
content = [
393-
(
404+
content = []
405+
for w in weights:
406+
row = [
394407
f":class:`{w} <{type(w).__name__}>`",
395408
*(w.meta["_metrics"][dataset][metric] for metric in metrics_keys),
396409
f"{w.meta['num_params']/1e6:.1f}M",
410+
f"{w.meta['_ops']:.3f}",
411+
f"{round(w.meta['_weight_size'], 1):.1f}",
397412
f"`link <{w.meta['recipe']}>`__",
398-
)
399-
for w in weights
400-
]
413+
]
414+
415+
content.append(row)
416+
417+
column_widths = ["110"] + ["18"] * len(metrics_names) + ["18"] * 3 + ["10"]
418+
widths_table = " ".join(column_widths)
419+
401420
table = tabulate(content, headers=column_names, tablefmt="rst")
402421

403422
generated_dir = Path("generated")
404423
generated_dir.mkdir(exist_ok=True)
405424
with open(generated_dir / f"{table_name}_table.rst", "w+") as table_file:
406425
table_file.write(".. rst-class:: table-weights\n") # Custom CSS class, see custom_torchvision.css
407426
table_file.write(".. table::\n")
408-
table_file.write(f" :widths: 100 {'20 ' * len(metrics_names)} 20 10\n\n")
427+
table_file.write(f" :widths: {widths_table} \n\n")
409428
table_file.write(f"{textwrap.indent(table, ' ' * 4)}\n\n")
410429

411430

test/test_extended_models.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -155,11 +155,13 @@ def test_schema_meta_validation(model_fn):
155155
"recipe",
156156
"unquantized",
157157
"_docs",
158+
"_ops",
159+
"_weight_size",
158160
}
159161
# mandatory fields for each computer vision task
160162
classification_fields = {"categories", ("_metrics", "ImageNet-1K", "acc@1"), ("_metrics", "ImageNet-1K", "acc@5")}
161163
defaults = {
162-
"all": {"_metrics", "min_size", "num_params", "recipe", "_docs"},
164+
"all": {"_metrics", "min_size", "num_params", "recipe", "_docs", "_weight_size", "_ops"},
163165
"models": classification_fields,
164166
"detection": {"categories", ("_metrics", "COCO-val2017", "box_map")},
165167
"quantization": classification_fields | {"backend", "unquantized"},

torchvision/models/alexnet.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,8 @@ class AlexNet_Weights(WeightsEnum):
6767
"acc@5": 79.066,
6868
}
6969
},
70+
"_ops": 0.714,
71+
"_weight_size": 233.087,
7072
"_docs": """
7173
These weights reproduce closely the results of the paper using a simplified training recipe.
7274
""",

torchvision/models/convnext.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -219,6 +219,8 @@ class ConvNeXt_Tiny_Weights(WeightsEnum):
219219
"acc@5": 96.146,
220220
}
221221
},
222+
"_ops": 4.456,
223+
"_weight_size": 109.119,
222224
},
223225
)
224226
DEFAULT = IMAGENET1K_V1
@@ -237,6 +239,8 @@ class ConvNeXt_Small_Weights(WeightsEnum):
237239
"acc@5": 96.650,
238240
}
239241
},
242+
"_ops": 8.684,
243+
"_weight_size": 191.703,
240244
},
241245
)
242246
DEFAULT = IMAGENET1K_V1
@@ -255,6 +259,8 @@ class ConvNeXt_Base_Weights(WeightsEnum):
255259
"acc@5": 96.870,
256260
}
257261
},
262+
"_ops": 15.355,
263+
"_weight_size": 338.064,
258264
},
259265
)
260266
DEFAULT = IMAGENET1K_V1
@@ -273,6 +279,8 @@ class ConvNeXt_Large_Weights(WeightsEnum):
273279
"acc@5": 96.976,
274280
}
275281
},
282+
"_ops": 34.361,
283+
"_weight_size": 754.537,
276284
},
277285
)
278286
DEFAULT = IMAGENET1K_V1

torchvision/models/densenet.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
from ._meta import _IMAGENET_CATEGORIES
1616
from ._utils import _ovewrite_named_param, handle_legacy_interface
1717

18-
1918
__all__ = [
2019
"DenseNet",
2120
"DenseNet121_Weights",
@@ -278,6 +277,8 @@ class DenseNet121_Weights(WeightsEnum):
278277
"acc@5": 91.972,
279278
}
280279
},
280+
"_ops": 2.834,
281+
"_weight_size": 30.845,
281282
},
282283
)
283284
DEFAULT = IMAGENET1K_V1
@@ -296,6 +297,8 @@ class DenseNet161_Weights(WeightsEnum):
296297
"acc@5": 93.560,
297298
}
298299
},
300+
"_ops": 7.728,
301+
"_weight_size": 110.369,
299302
},
300303
)
301304
DEFAULT = IMAGENET1K_V1
@@ -314,6 +317,8 @@ class DenseNet169_Weights(WeightsEnum):
314317
"acc@5": 92.806,
315318
}
316319
},
320+
"_ops": 3.36,
321+
"_weight_size": 54.708,
317322
},
318323
)
319324
DEFAULT = IMAGENET1K_V1
@@ -332,6 +337,8 @@ class DenseNet201_Weights(WeightsEnum):
332337
"acc@5": 93.370,
333338
}
334339
},
340+
"_ops": 4.291,
341+
"_weight_size": 77.373,
335342
},
336343
)
337344
DEFAULT = IMAGENET1K_V1
@@ -444,7 +451,6 @@ def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool
444451
# The dictionary below is internal implementation detail and will be removed in v0.15
445452
from ._utils import _ModelURLs
446453

447-
448454
model_urls = _ModelURLs(
449455
{
450456
"densenet121": DenseNet121_Weights.IMAGENET1K_V1.url,

torchvision/models/detection/faster_rcnn.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -388,6 +388,8 @@ class FasterRCNN_ResNet50_FPN_Weights(WeightsEnum):
388388
"box_map": 37.0,
389389
}
390390
},
391+
"_ops": 134.38,
392+
"_weight_size": 159.743,
391393
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
392394
},
393395
)
@@ -407,6 +409,8 @@ class FasterRCNN_ResNet50_FPN_V2_Weights(WeightsEnum):
407409
"box_map": 46.7,
408410
}
409411
},
412+
"_ops": 280.371,
413+
"_weight_size": 167.104,
410414
"_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""",
411415
},
412416
)
@@ -426,6 +430,8 @@ class FasterRCNN_MobileNet_V3_Large_FPN_Weights(WeightsEnum):
426430
"box_map": 32.8,
427431
}
428432
},
433+
"_ops": 4.494,
434+
"_weight_size": 74.239,
429435
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
430436
},
431437
)
@@ -445,6 +451,8 @@ class FasterRCNN_MobileNet_V3_Large_320_FPN_Weights(WeightsEnum):
445451
"box_map": 22.8,
446452
}
447453
},
454+
"_ops": 0.719,
455+
"_weight_size": 74.239,
448456
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
449457
},
450458
)

torchvision/models/detection/fcos.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -662,6 +662,8 @@ class FCOS_ResNet50_FPN_Weights(WeightsEnum):
662662
"box_map": 39.2,
663663
}
664664
},
665+
"_ops": 128.207,
666+
"_weight_size": 123.608,
665667
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
666668
},
667669
)

torchvision/models/detection/keypoint_rcnn.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -328,6 +328,8 @@ class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum):
328328
"kp_map": 61.1,
329329
}
330330
},
331+
"_ops": 133.924,
332+
"_weight_size": 226.054,
331333
"_docs": """
332334
These weights were produced by following a similar training recipe as on the paper but use a checkpoint
333335
from an early epoch.
@@ -347,6 +349,8 @@ class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum):
347349
"kp_map": 65.0,
348350
}
349351
},
352+
"_ops": 137.42,
353+
"_weight_size": 226.054,
350354
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
351355
},
352356
)

torchvision/models/detection/mask_rcnn.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -370,6 +370,8 @@ class MaskRCNN_ResNet50_FPN_Weights(WeightsEnum):
370370
"mask_map": 34.6,
371371
}
372372
},
373+
"_ops": 134.38,
374+
"_weight_size": 169.84,
373375
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
374376
},
375377
)
@@ -390,6 +392,8 @@ class MaskRCNN_ResNet50_FPN_V2_Weights(WeightsEnum):
390392
"mask_map": 41.8,
391393
}
392394
},
395+
"_ops": 333.577,
396+
"_weight_size": 177.219,
393397
"_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""",
394398
},
395399
)

torchvision/models/detection/retinanet.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -690,6 +690,8 @@ class RetinaNet_ResNet50_FPN_Weights(WeightsEnum):
690690
"box_map": 36.4,
691691
}
692692
},
693+
"_ops": 151.54,
694+
"_weight_size": 130.267,
693695
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
694696
},
695697
)
@@ -709,6 +711,8 @@ class RetinaNet_ResNet50_FPN_V2_Weights(WeightsEnum):
709711
"box_map": 41.5,
710712
}
711713
},
714+
"_ops": 152.238,
715+
"_weight_size": 146.037,
712716
"_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""",
713717
},
714718
)

0 commit comments

Comments
 (0)