timm
/

Image Classification
timm
PyTorch
Safetensors
rwightman HF staff commited on
Commit
4a8e515
1 Parent(s): d6dfbbb

Update model config and README

Browse files
Files changed (2) hide show
  1. README.md +21 -17
  2. model.safetensors +3 -0
README.md CHANGED
@@ -2,7 +2,7 @@
2
  tags:
3
  - image-classification
4
  - timm
5
- library_tag: timm
6
  license: apache-2.0
7
  datasets:
8
  - imagenet-1k
@@ -13,7 +13,7 @@ A timm specific MaxxViT (w/ a MLP Log-CPB (continuous log-coordinate relative po
13
 
14
  ImageNet-1k training done on TPUs thanks to support of the [TRC](https://sites.research.google/trc/about/) program.
15
 
16
- ### Model Variants in [maxxvit.py](https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/maxxvit.py)
17
 
18
  MaxxViT covers a number of related model architectures that share a common structure including:
19
  - CoAtNet - Combining MBConv (depthwise-separable) convolutional blocks in early stages with self-attention transformer blocks in later stages.
@@ -45,8 +45,9 @@ from urllib.request import urlopen
45
  from PIL import Image
46
  import timm
47
 
48
- img = Image.open(
49
- urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'))
 
50
 
51
  model = timm.create_model('maxxvit_rmlp_small_rw_256.sw_in1k', pretrained=True)
52
  model = model.eval()
@@ -66,8 +67,9 @@ from urllib.request import urlopen
66
  from PIL import Image
67
  import timm
68
 
69
- img = Image.open(
70
- urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'))
 
71
 
72
  model = timm.create_model(
73
  'maxxvit_rmlp_small_rw_256.sw_in1k',
@@ -84,12 +86,13 @@ output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batc
84
 
85
  for o in output:
86
  # print shape of each feature map in output
87
- # e.g.:
88
- # torch.Size([1, 128, 192, 192])
89
- # torch.Size([1, 128, 96, 96])
90
- # torch.Size([1, 256, 48, 48])
91
- # torch.Size([1, 512, 24, 24])
92
- # torch.Size([1, 1024, 12, 12])
 
93
  print(o.shape)
94
  ```
95
 
@@ -99,8 +102,9 @@ from urllib.request import urlopen
99
  from PIL import Image
100
  import timm
101
 
102
- img = Image.open(
103
- urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'))
 
104
 
105
  model = timm.create_model(
106
  'maxxvit_rmlp_small_rw_256.sw_in1k',
@@ -118,10 +122,10 @@ output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_featu
118
  # or equivalently (without needing to set num_classes=0)
119
 
120
  output = model.forward_features(transforms(img).unsqueeze(0))
121
- # output is unpooled (ie.e a (batch_size, num_features, H, W) tensor
122
 
123
  output = model.forward_head(output, pre_logits=True)
124
- # output is (batch_size, num_features) tensor
125
  ```
126
 
127
  ## Model Comparison
@@ -229,7 +233,7 @@ output = model.forward_head(output, pre_logits=True)
229
  publisher = {GitHub},
230
  journal = {GitHub repository},
231
  doi = {10.5281/zenodo.4414861},
232
- howpublished = {\url{https://github.com/rwightman/pytorch-image-models}}
233
  }
234
  ```
235
  ```bibtex
 
2
  tags:
3
  - image-classification
4
  - timm
5
+ library_name: timm
6
  license: apache-2.0
7
  datasets:
8
  - imagenet-1k
 
13
 
14
  ImageNet-1k training done on TPUs thanks to support of the [TRC](https://sites.research.google/trc/about/) program.
15
 
16
+ ### Model Variants in [maxxvit.py](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/maxxvit.py)
17
 
18
  MaxxViT covers a number of related model architectures that share a common structure including:
19
  - CoAtNet - Combining MBConv (depthwise-separable) convolutional blocks in early stages with self-attention transformer blocks in later stages.
 
45
  from PIL import Image
46
  import timm
47
 
48
+ img = Image.open(urlopen(
49
+ 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
50
+ ))
51
 
52
  model = timm.create_model('maxxvit_rmlp_small_rw_256.sw_in1k', pretrained=True)
53
  model = model.eval()
 
67
  from PIL import Image
68
  import timm
69
 
70
+ img = Image.open(urlopen(
71
+ 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
72
+ ))
73
 
74
  model = timm.create_model(
75
  'maxxvit_rmlp_small_rw_256.sw_in1k',
 
86
 
87
  for o in output:
88
  # print shape of each feature map in output
89
+ # e.g.:
90
+ # torch.Size([1, 96, 128, 128])
91
+ # torch.Size([1, 96, 64, 64])
92
+ # torch.Size([1, 192, 32, 32])
93
+ # torch.Size([1, 384, 16, 16])
94
+ # torch.Size([1, 768, 8, 8])
95
+
96
  print(o.shape)
97
  ```
98
 
 
102
  from PIL import Image
103
  import timm
104
 
105
+ img = Image.open(urlopen(
106
+ 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
107
+ ))
108
 
109
  model = timm.create_model(
110
  'maxxvit_rmlp_small_rw_256.sw_in1k',
 
122
  # or equivalently (without needing to set num_classes=0)
123
 
124
  output = model.forward_features(transforms(img).unsqueeze(0))
125
+ # output is unpooled, a (1, 768, 8, 8) shaped tensor
126
 
127
  output = model.forward_head(output, pre_logits=True)
128
+ # output is a (1, num_features) shaped tensor
129
  ```
130
 
131
  ## Model Comparison
 
233
  publisher = {GitHub},
234
  journal = {GitHub repository},
235
  doi = {10.5281/zenodo.4414861},
236
+ howpublished = {\url{https://github.com/huggingface/pytorch-image-models}}
237
  }
238
  ```
239
  ```bibtex
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99294255a6edcd0601921e35d662c0f988052e4846a614bc9fba5dc2185aa289
3
+ size 264098550