Skip to content

Commit e967c72

Browse files
committed
Update REAMDE.md. Sneak in g/G (giant / gigantic?) ViT defs from scaling paper
1 parent 9ca3437 commit e967c72

File tree

3 files changed

+42
-8
lines changed

3 files changed

+42
-8
lines changed

README.md

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,14 @@ I'm fortunate to be able to dedicate significant time and money of my own suppor
2323

2424
## What's New
2525

26-
### Jan 6, 2022
27-
* Version 0.5.2 w/ release to be pushed to pypi. It's been a while since last pypi update and riskier changes will be merged to main branch soon....
28-
* Tried training a few small / mobile optimized models, a few are good so far, more on the way...
26+
### Jan 14, 2022
27+
* Version 0.5.4 w/ release to be pushed to pypi. It's been a while since last pypi update and riskier changes will be merged to main branch soon....
28+
* Add ConvNeXT models /w weights from official impl (https://github.com/facebookresearch/ConvNeXt), a few perf tweaks, compatible with timm features
29+
* Tried training a few small (~1.8-3M param) / mobile optimized models, a few are good so far, more on the way...
2930
* `mnasnet_small` - 65.6 top-1
30-
* `lcnet_100` - 72.1 top-1
31+
* `mobilenetv2_050` - 65.9
32+
* `lcnet_100/075/050` - 72.1 / 68.8 / 63.1
33+
* `semnasnet_075` - 73
3134
* `fbnetv3_b/d/g` - 79.1 / 79.7 / 82.0
3235
* TinyNet models added by [rsomani95](https://github.com/rsomani95)
3336
* LCNet added via MobileNetV3 architecture

tests/test_models.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,12 +28,12 @@
2828
NUM_NON_STD = len(NON_STD_FILTERS)
2929

3030
# exclude models that cause specific test failures
31-
if 'GITHUB_ACTIONS' in os.environ: # and 'Linux' in platform.system():
31+
if 'GITHUB_ACTIONS' in os.environ:
3232
# GitHub Linux runner is slower and hits memory limits sooner than MacOS, exclude bigger models
3333
EXCLUDE_FILTERS = [
3434
'*efficientnet_l2*', '*resnext101_32x48d', '*in21k', '*152x4_bitm', '*101x3_bitm', '*50x3_bitm',
3535
'*nfnet_f3*', '*nfnet_f4*', '*nfnet_f5*', '*nfnet_f6*', '*nfnet_f7*', '*efficientnetv2_xl*',
36-
'*resnetrs350*', '*resnetrs420*', 'xcit_large_24_p8*']
36+
'*resnetrs350*', '*resnetrs420*', 'xcit_large_24_p8*', 'vit_gi*']
3737
else:
3838
EXCLUDE_FILTERS = []
3939

@@ -255,7 +255,7 @@ def test_model_features_pretrained(model_name, batch_size):
255255
EXCLUDE_JIT_FILTERS = [
256256
'*iabn*', 'tresnet*', # models using inplace abn unlikely to ever be scriptable
257257
'dla*', 'hrnet*', 'ghostnet*', # hopefully fix at some point
258-
'vit_large_*', 'vit_huge_*',
258+
'vit_large_*', 'vit_huge_*', 'vit_gi*',
259259
]
260260

261261

@@ -334,7 +334,7 @@ def _create_fx_model(model, train=False):
334334
return fx_model
335335

336336

337-
EXCLUDE_FX_FILTERS = []
337+
EXCLUDE_FX_FILTERS = ['vit_gi*']
338338
# not enough memory to run fx on more models than other tests
339339
if 'GITHUB_ACTIONS' in os.environ:
340340
EXCLUDE_FX_FILTERS += [

timm/models/vision_transformer.py

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,10 @@ def _cfg(url='', **kwargs):
105105
'L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz',
106106
input_size=(3, 384, 384), crop_pct=1.0),
107107

108+
'vit_huge_patch14_224': _cfg(url=''),
109+
'vit_giant_patch14_224': _cfg(url=''),
110+
'vit_gigantic_patch14_224': _cfg(url=''),
111+
108112
# patch models, imagenet21k (weights from official Google JAX impl)
109113
'vit_tiny_patch16_224_in21k': _cfg(
110114
url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz',
@@ -715,6 +719,33 @@ def vit_base_patch32_sam_224(pretrained=False, **kwargs):
715719
return model
716720

717721

722+
@register_model
723+
def vit_huge_patch14_224(pretrained=False, **kwargs):
724+
""" ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929).
725+
"""
726+
model_kwargs = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, **kwargs)
727+
model = _create_vision_transformer('vit_huge_patch14_224', pretrained=pretrained, **model_kwargs)
728+
return model
729+
730+
731+
@register_model
732+
def vit_giant_patch14_224(pretrained=False, **kwargs):
733+
""" ViT-Giant model (ViT-g/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560
734+
"""
735+
model_kwargs = dict(patch_size=14, embed_dim=1408, mlp_ratio=48/11, depth=40, num_heads=16, **kwargs)
736+
model = _create_vision_transformer('vit_giant_patch14_224', pretrained=pretrained, **model_kwargs)
737+
return model
738+
739+
740+
@register_model
741+
def vit_gigantic_patch14_224(pretrained=False, **kwargs):
742+
""" ViT-Gigantic model (ViT-G/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560
743+
"""
744+
model_kwargs = dict(patch_size=14, embed_dim=1664, mlp_ratio=64/13, depth=48, num_heads=16, **kwargs)
745+
model = _create_vision_transformer('vit_gigantic_patch14_224', pretrained=pretrained, **model_kwargs)
746+
return model
747+
748+
718749
@register_model
719750
def vit_tiny_patch16_224_in21k(pretrained=False, **kwargs):
720751
""" ViT-Tiny (Vit-Ti/16).

0 commit comments

Comments
 (0)