rwightman HF staff commited on
Commit
6a3496b
1 Parent(s): 53ae89d

Update model config and README

Browse files
Files changed (2) hide show
  1. README.md +21 -17
  2. model.safetensors +3 -0
README.md CHANGED
@@ -2,7 +2,7 @@
2
  tags:
3
  - image-classification
4
  - timm
5
- library_tag: timm
6
  license: apache-2.0
7
  datasets:
8
  - imagenet-1k
@@ -16,7 +16,7 @@ ImageNet-12k training performed on TPUs thanks to support of the [TRC](https://s
16
 
17
  Fine-tuning performed on 8x GPU [Lambda Labs](https://lambdalabs.com/) cloud instances.
18
 
19
- ### Model Variants in [maxxvit.py](https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/maxxvit.py)
20
 
21
  MaxxViT covers a number of related model architectures that share a common structure including:
22
  - CoAtNet - Combining MBConv (depthwise-separable) convolutional blocks in early stages with self-attention transformer blocks in later stages.
@@ -48,8 +48,9 @@ from urllib.request import urlopen
48
  from PIL import Image
49
  import timm
50
 
51
- img = Image.open(
52
- urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'))
 
53
 
54
  model = timm.create_model('coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k', pretrained=True)
55
  model = model.eval()
@@ -69,8 +70,9 @@ from urllib.request import urlopen
69
  from PIL import Image
70
  import timm
71
 
72
- img = Image.open(
73
- urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'))
 
74
 
75
  model = timm.create_model(
76
  'coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k',
@@ -87,12 +89,13 @@ output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batc
87
 
88
  for o in output:
89
  # print shape of each feature map in output
90
- # e.g.:
91
- # torch.Size([1, 128, 192, 192])
92
- # torch.Size([1, 128, 96, 96])
93
- # torch.Size([1, 256, 48, 48])
94
- # torch.Size([1, 512, 24, 24])
95
- # torch.Size([1, 1024, 12, 12])
 
96
  print(o.shape)
97
  ```
98
 
@@ -102,8 +105,9 @@ from urllib.request import urlopen
102
  from PIL import Image
103
  import timm
104
 
105
- img = Image.open(
106
- urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'))
 
107
 
108
  model = timm.create_model(
109
  'coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k',
@@ -121,10 +125,10 @@ output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_featu
121
  # or equivalently (without needing to set num_classes=0)
122
 
123
  output = model.forward_features(transforms(img).unsqueeze(0))
124
- # output is unpooled (ie.e a (batch_size, num_features, H, W) tensor
125
 
126
  output = model.forward_head(output, pre_logits=True)
127
- # output is (batch_size, num_features) tensor
128
  ```
129
 
130
  ## Model Comparison
@@ -232,7 +236,7 @@ output = model.forward_head(output, pre_logits=True)
232
  publisher = {GitHub},
233
  journal = {GitHub repository},
234
  doi = {10.5281/zenodo.4414861},
235
- howpublished = {\url{https://github.com/rwightman/pytorch-image-models}}
236
  }
237
  ```
238
  ```bibtex
2
  tags:
3
  - image-classification
4
  - timm
5
+ library_name: timm
6
  license: apache-2.0
7
  datasets:
8
  - imagenet-1k
16
 
17
  Fine-tuning performed on 8x GPU [Lambda Labs](https://lambdalabs.com/) cloud instances.
18
 
19
+ ### Model Variants in [maxxvit.py](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/maxxvit.py)
20
 
21
  MaxxViT covers a number of related model architectures that share a common structure including:
22
  - CoAtNet - Combining MBConv (depthwise-separable) convolutional blocks in early stages with self-attention transformer blocks in later stages.
48
  from PIL import Image
49
  import timm
50
 
51
+ img = Image.open(urlopen(
52
+ 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
53
+ ))
54
 
55
  model = timm.create_model('coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k', pretrained=True)
56
  model = model.eval()
70
  from PIL import Image
71
  import timm
72
 
73
+ img = Image.open(urlopen(
74
+ 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
75
+ ))
76
 
77
  model = timm.create_model(
78
  'coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k',
89
 
90
  for o in output:
91
  # print shape of each feature map in output
92
+ # e.g.:
93
+ # torch.Size([1, 64, 112, 112])
94
+ # torch.Size([1, 96, 56, 56])
95
+ # torch.Size([1, 192, 28, 28])
96
+ # torch.Size([1, 384, 14, 14])
97
+ # torch.Size([1, 768, 7, 7])
98
+
99
  print(o.shape)
100
  ```
101
 
105
  from PIL import Image
106
  import timm
107
 
108
+ img = Image.open(urlopen(
109
+ 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
110
+ ))
111
 
112
  model = timm.create_model(
113
  'coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k',
125
  # or equivalently (without needing to set num_classes=0)
126
 
127
  output = model.forward_features(transforms(img).unsqueeze(0))
128
+ # output is unpooled, a (1, 768, 7, 7) shaped tensor
129
 
130
  output = model.forward_head(output, pre_logits=True)
131
+ # output is a (1, num_features) shaped tensor
132
  ```
133
 
134
  ## Model Comparison
236
  publisher = {GitHub},
237
  journal = {GitHub repository},
238
  doi = {10.5281/zenodo.4414861},
239
+ howpublished = {\url{https://github.com/huggingface/pytorch-image-models}}
240
  }
241
  ```
242
  ```bibtex
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12e9f7a9707028b2125f03c389f80d8d3202dd830b381f309a9f28c7a3eeb59f
3
+ size 167032870