rwightman HF staff commited on
Commit
2e7e99a
1 Parent(s): 29b6cd9

Update model config and README

Browse files
Files changed (2) hide show
  1. README.md +21 -17
  2. model.safetensors +3 -0
README.md CHANGED
@@ -2,7 +2,7 @@
2
  tags:
3
  - image-classification
4
  - timm
5
- library_tag: timm
6
  license: apache-2.0
7
  datasets:
8
  - imagenet-1k
@@ -13,7 +13,7 @@ A timm specific MaxViT (w/ a MLP Log-CPB (continuous log-coordinate relative pos
13
 
14
  ImageNet-1k training done on TPUs thanks to support of the [TRC](https://sites.research.google/trc/about/) program.
15
 
16
- ### Model Variants in [maxxvit.py](https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/maxxvit.py)
17
 
18
  MaxxViT covers a number of related model architectures that share a common structure including:
19
  - CoAtNet - Combining MBConv (depthwise-separable) convolutional blocks in early stages with self-attention transformer blocks in later stages.
@@ -44,8 +44,9 @@ from urllib.request import urlopen
44
  from PIL import Image
45
  import timm
46
 
47
- img = Image.open(
48
- urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'))
 
49
 
50
  model = timm.create_model('maxvit_rmlp_pico_rw_256.sw_in1k', pretrained=True)
51
  model = model.eval()
@@ -65,8 +66,9 @@ from urllib.request import urlopen
65
  from PIL import Image
66
  import timm
67
 
68
- img = Image.open(
69
- urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'))
 
70
 
71
  model = timm.create_model(
72
  'maxvit_rmlp_pico_rw_256.sw_in1k',
@@ -83,12 +85,13 @@ output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batc
83
 
84
  for o in output:
85
  # print shape of each feature map in output
86
- # e.g.:
87
- # torch.Size([1, 128, 192, 192])
88
- # torch.Size([1, 128, 96, 96])
89
- # torch.Size([1, 256, 48, 48])
90
- # torch.Size([1, 512, 24, 24])
91
- # torch.Size([1, 1024, 12, 12])
 
92
  print(o.shape)
93
  ```
94
 
@@ -98,8 +101,9 @@ from urllib.request import urlopen
98
  from PIL import Image
99
  import timm
100
 
101
- img = Image.open(
102
- urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'))
 
103
 
104
  model = timm.create_model(
105
  'maxvit_rmlp_pico_rw_256.sw_in1k',
@@ -117,10 +121,10 @@ output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_featu
117
  # or equivalently (without needing to set num_classes=0)
118
 
119
  output = model.forward_features(transforms(img).unsqueeze(0))
120
- # output is unpooled (ie.e a (batch_size, num_features, H, W) tensor
121
 
122
  output = model.forward_head(output, pre_logits=True)
123
- # output is (batch_size, num_features) tensor
124
  ```
125
 
126
  ## Model Comparison
@@ -228,7 +232,7 @@ output = model.forward_head(output, pre_logits=True)
228
  publisher = {GitHub},
229
  journal = {GitHub repository},
230
  doi = {10.5281/zenodo.4414861},
231
- howpublished = {\url{https://github.com/rwightman/pytorch-image-models}}
232
  }
233
  ```
234
  ```bibtex
2
  tags:
3
  - image-classification
4
  - timm
5
+ library_name: timm
6
  license: apache-2.0
7
  datasets:
8
  - imagenet-1k
13
 
14
  ImageNet-1k training done on TPUs thanks to support of the [TRC](https://sites.research.google/trc/about/) program.
15
 
16
+ ### Model Variants in [maxxvit.py](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/maxxvit.py)
17
 
18
  MaxxViT covers a number of related model architectures that share a common structure including:
19
  - CoAtNet - Combining MBConv (depthwise-separable) convolutional blocks in early stages with self-attention transformer blocks in later stages.
44
  from PIL import Image
45
  import timm
46
 
47
+ img = Image.open(urlopen(
48
+ 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
49
+ ))
50
 
51
  model = timm.create_model('maxvit_rmlp_pico_rw_256.sw_in1k', pretrained=True)
52
  model = model.eval()
66
  from PIL import Image
67
  import timm
68
 
69
+ img = Image.open(urlopen(
70
+ 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
71
+ ))
72
 
73
  model = timm.create_model(
74
  'maxvit_rmlp_pico_rw_256.sw_in1k',
85
 
86
  for o in output:
87
  # print shape of each feature map in output
88
+ # e.g.:
89
+ # torch.Size([1, 32, 128, 128])
90
+ # torch.Size([1, 32, 64, 64])
91
+ # torch.Size([1, 64, 32, 32])
92
+ # torch.Size([1, 128, 16, 16])
93
+ # torch.Size([1, 256, 8, 8])
94
+
95
  print(o.shape)
96
  ```
97
 
101
  from PIL import Image
102
  import timm
103
 
104
+ img = Image.open(urlopen(
105
+ 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
106
+ ))
107
 
108
  model = timm.create_model(
109
  'maxvit_rmlp_pico_rw_256.sw_in1k',
121
  # or equivalently (without needing to set num_classes=0)
122
 
123
  output = model.forward_features(transforms(img).unsqueeze(0))
124
+ # output is unpooled, a (1, 256, 8, 8) shaped tensor
125
 
126
  output = model.forward_head(output, pre_logits=True)
127
+ # output is a (1, num_features) shaped tensor
128
  ```
129
 
130
  ## Model Comparison
232
  publisher = {GitHub},
233
  journal = {GitHub repository},
234
  doi = {10.5281/zenodo.4414861},
235
+ howpublished = {\url{https://github.com/huggingface/pytorch-image-models}}
236
  }
237
  ```
238
  ```bibtex
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:941f10b997e941a323ca872f9206ca9bcccd4192f3964382051894e4a607f547
3
+ size 30211364