@@ -1149,15 +1149,26 @@ def _cfg(url='', **kwargs):
11491149 'vit_giant_patch14_224.untrained' : _cfg (url = '' ),
11501150 'vit_gigantic_patch14_224.untrained' : _cfg (url = '' ),
11511151
1152- # patch models, imagenet21k (weights from official Google JAX impl)
1152+ # patch models, imagenet21k (weights from official Google JAX impl), classifier not valid
1153+ 'vit_base_patch32_224.orig_in21k' : _cfg (
1154+ #url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch32_224_in21k-8db57226.pth',
1155+ hf_hub_id = 'timm/' ,
1156+ num_classes = 0 ),
1157+ 'vit_base_patch16_224.orig_in21k' : _cfg (
1158+ #url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch16_224_in21k-e5005f0a.pth',
1159+ hf_hub_id = 'timm/' ,
1160+ num_classes = 0 ),
11531161 'vit_large_patch32_224.orig_in21k' : _cfg (
1154- url = 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth' ,
1162+ #url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth',
1163+ hf_hub_id = 'timm/' ,
1164+ num_classes = 0 ),
1165+ 'vit_large_patch16_224.orig_in21k' : _cfg (
1166+ #url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch16_224_in21k-606da67d.pth',
11551167 hf_hub_id = 'timm/' ,
1156- num_classes = 21843 ),
1168+ num_classes = 0 ),
11571169 'vit_huge_patch14_224.orig_in21k' : _cfg (
1158- url = 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-H_14.npz' ,
11591170 hf_hub_id = 'timm/' ,
1160- custom_load = True , num_classes = 21843 ),
1171+ num_classes = 0 ),
11611172
11621173 # How to train your ViT (augreg) weights, pretrained on in21k
11631174 'vit_tiny_patch16_224.augreg_in21k' : _cfg (
@@ -1498,19 +1509,19 @@ def _cfg(url='', **kwargs):
14981509 mean = OPENAI_CLIP_MEAN , std = OPENAI_CLIP_STD , crop_pct = 1.0 , num_classes = 1024 ),
14991510
15001511 'vit_base_patch32_clip_224.openai' : _cfg (
1501- hf_hub_id = 'timm/' ,
1512+ hf_hub_id = 'timm/vit_base_patch32_clip_224.openai ' ,
15021513 notes = ('natively QuickGELU, use quickgelu model variant for original results' ,),
15031514 mean = OPENAI_CLIP_MEAN , std = OPENAI_CLIP_STD , num_classes = 512 ),
15041515 'vit_base_patch16_clip_224.openai' : _cfg (
1505- hf_hub_id = 'timm/' ,
1516+ hf_hub_id = 'timm/vit_base_patch16_clip_224.openai ' ,
15061517 notes = ('natively QuickGELU, use quickgelu model variant for original results' ,),
15071518 mean = OPENAI_CLIP_MEAN , std = OPENAI_CLIP_STD , num_classes = 512 ),
15081519 'vit_large_patch14_clip_224.openai' : _cfg (
1509- hf_hub_id = 'timm/' ,
1520+ hf_hub_id = 'timm/vit_large_patch14_clip_224.openai ' ,
15101521 notes = ('natively QuickGELU, use quickgelu model variant for original results' ,),
15111522 mean = OPENAI_CLIP_MEAN , std = OPENAI_CLIP_STD , crop_pct = 1.0 , num_classes = 768 ),
15121523 'vit_large_patch14_clip_336.openai' : _cfg (
1513- hf_hub_id = 'timm/' , hf_hub_filename = 'open_clip_pytorch_model.bin' ,
1524+ hf_hub_id = 'timm/vit_large_patch14_clip_336.openai ' , hf_hub_filename = 'open_clip_pytorch_model.bin' ,
15141525 notes = ('natively QuickGELU, use quickgelu model variant for original results' ,),
15151526 mean = OPENAI_CLIP_MEAN , std = OPENAI_CLIP_STD ,
15161527 crop_pct = 1.0 , input_size = (3 , 336 , 336 ), num_classes = 768 ),
0 commit comments