Datasets:
File size: 133,765 Bytes
31e3bb5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 |
,modelId,paper_link,github_link,huggingface_info
0,google/switch-large-128,https://arxiv.org/pdf/2101.03961.pdf,https://github.com/google-research/t5x/blob/main/README.md,"{'likes': 4, 'downloads': 971, 'tags': ['transformers', 'pytorch', 'switch_transformers', 'text2text-generation', 'en', 'dataset:c4', 'arxiv:2101.03961', 'arxiv:1910.09700', 'license:apache-2.0', 'autotrain_compatible', 'region:us', 'has_space'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
1,facebook/convnextv2-large-22k-224,https://arxiv.org/pdf/2301.00808.pdf,https://github.com/facebookresearch/ConvNeXt-V2/blob/main/README.md,"{'likes': 2, 'downloads': 456, 'tags': ['transformers', 'pytorch', 'convnextv2', 'image-classification', 'dataset:imagenet-22k', 'arxiv:2301.00808', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
2,facebook/mask2former-swin-base-coco-panoptic,https://arxiv.org/pdf/2112.01527.pdf,https://github.com/facebookresearch/Mask2Former/blob/main/README.md,"{'likes': 10, 'downloads': 22037, 'tags': ['transformers', 'pytorch', 'safetensors', 'mask2former', 'dataset:coco', 'arxiv:2112.01527', 'arxiv:2107.06278', 'vision', 'image-segmentation', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
3,facebook/levit-128S,https://arxiv.org/pdf/2104.01136.pdf,https://github.com/facebookresearch/LeViT/blob/main/README.md,"{'likes': 2, 'downloads': 686, 'tags': ['transformers', 'pytorch', 'levit', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2104.01136', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
4,facebook/mask2former-swin-tiny-cityscapes-instance,https://arxiv.org/pdf/2112.01527.pdf,https://github.com/facebookresearch/Mask2Former/blob/main/README.md,"{'likes': 0, 'downloads': 608, 'tags': ['transformers', 'pytorch', 'safetensors', 'mask2former', 'dataset:coco', 'arxiv:2112.01527', 'arxiv:2107.06278', 'vision', 'image-segmentation', 'license:other', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
5,albert-xxlarge-v2,https://arxiv.org/pdf/1909.11942.pdf,https://github.com/google-research/albert/blob/master/README.md,"{'likes': 11, 'downloads': 25921, 'tags': ['transformers', 'pytorch', 'tf', 'safetensors', 'albert', 'fill-mask', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1909.11942', 'exbert', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
6,microsoft/dit-base-finetuned-rvlcdip,https://arxiv.org/pdf/2203.02378.pdf,https://github.com/microsoft/unilm/blob/master/dit/README.md,"{'likes': 16, 'downloads': 100910, 'tags': ['transformers', 'pytorch', 'beit', 'image-classification', 'dataset:rvl_cdip', 'arxiv:2203.02378', 'dit', 'vision', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
7,facebook/mask2former-swin-tiny-cityscapes-semantic,https://arxiv.org/pdf/2112.01527.pdf,https://github.com/facebookresearch/Mask2Former/blob/main/README.md,"{'likes': 2, 'downloads': 1674, 'tags': ['transformers', 'pytorch', 'safetensors', 'mask2former', 'dataset:coco', 'arxiv:2112.01527', 'arxiv:2107.06278', 'vision', 'image-segmentation', 'license:other', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
8,google/vit-large-patch16-224-in21k,https://arxiv.org/pdf/2010.11929.pdf,https://github.com/google-research/vision_transformer/blob/main/README.md,"{'likes': 9, 'downloads': 30571, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'vit', 'feature-extraction', 'dataset:imagenet-21k', 'arxiv:2010.11929', 'arxiv:2006.03677', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
9,facebook/convnext-base-384-22k-1k,https://arxiv.org/pdf/2201.03545.pdf,https://github.com/facebookresearch/ConvNeXt/blob/main/README.md,"{'likes': 0, 'downloads': 435, 'tags': ['transformers', 'pytorch', 'tf', 'convnext', 'image-classification', 'dataset:imagenet-21k', 'dataset:imagenet-1k', 'arxiv:2201.03545', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
10,google/tapas-small-finetuned-wtq,https://arxiv.org/pdf/2004.02349.pdf,https://github.com/google-research/tapas/blob/master/README.md,"{'likes': 5, 'downloads': 939, 'tags': ['transformers', 'pytorch', 'tf', 'tapas', 'table-question-answering', 'en', 'dataset:wikitablequestions', 'arxiv:2004.02349', 'arxiv:2010.00571', 'arxiv:1508.00305', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'table-question-answering', 'class': 'classA'}"
11,facebook/convnextv2-atto-1k-224,https://arxiv.org/pdf/2301.00808.pdf,https://github.com/facebookresearch/ConvNeXt-V2/blob/main/README.md,"{'likes': 0, 'downloads': 328, 'tags': ['transformers', 'pytorch', 'tf', 'convnextv2', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2301.00808', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
12,facebook/opt-350m,https://arxiv.org/pdf/2205.01068.pdf,https://github.com/facebookresearch/metaseq/blob/main/README.md,"{'likes': 72, 'downloads': 187125, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'opt', 'text-generation', 'en', 'arxiv:2205.01068', 'arxiv:2005.14165', 'license:other', 'has_space', 'text-generation-inference', 'region:us'], 'pipeline_tag': 'text-generation', 'class': 'classA'}"
13,google/vit-huge-patch14-224-in21k,https://arxiv.org/pdf/2010.11929.pdf,https://github.com/google-research/vision_transformer/blob/main/README.md,"{'likes': 6, 'downloads': 30685, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'vit', 'feature-extraction', 'dataset:imagenet-21k', 'arxiv:2010.11929', 'arxiv:2006.03677', 'vision', 'license:apache-2.0', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
14,dandelin/vilt-b32-mlm,https://arxiv.org/pdf/2102.03334.pdf,https://github.com/dandelin/ViLT/blob/master/README.md,"{'likes': 6, 'downloads': 4954, 'tags': ['transformers', 'pytorch', 'vilt', 'fill-mask', 'arxiv:2102.03334', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
15,google/mobilenet_v2_1.0_224,https://arxiv.org/pdf/1801.04381.pdf,https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/README.md,"{'likes': 4, 'downloads': 12296, 'tags': ['transformers', 'pytorch', 'mobilenet_v2', 'image-classification', 'dataset:imagenet-1k', 'arxiv:1801.04381', 'vision', 'license:other', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
16,Salesforce/codet5-small,https://arxiv.org/pdf/2109.00859.pdf,https://github.com/salesforce/CodeT5/blob/main/README.md,"{'likes': 40, 'downloads': 6344, 'tags': ['transformers', 'pytorch', 't5', 'text2text-generation', 'dataset:code_search_net', 'arxiv:2109.00859', 'arxiv:1909.09436', 'codet5', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'text-generation-inference', 'region:us'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
17,facebook/nougat-small,https://arxiv.org/pdf/2308.13418.pdf,https://github.com/facebookresearch/nougat/blob/main/README.md,"{'likes': 4, 'downloads': 804, 'tags': ['transformers', 'pytorch', 'vision-encoder-decoder', 'vision', 'nougat', 'image-to-text', 'arxiv:2308.13418', 'license:apache-2.0', 'endpoints_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
18,roberta-large,https://arxiv.org/pdf/1907.11692.pdf,https://github.com/facebookresearch/fairseq/blob/main/examples/roberta/README.md,"{'likes': 126, 'downloads': 3917835, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'onnx', 'safetensors', 'roberta', 'fill-mask', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1907.11692', 'arxiv:1806.02847', 'exbert', 'license:mit', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
19,facebook/convnext-large-224,https://arxiv.org/pdf/2201.03545.pdf,https://github.com/facebookresearch/ConvNeXt/blob/main/README.md,"{'likes': 20, 'downloads': 378, 'tags': ['transformers', 'pytorch', 'tf', 'convnext', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2201.03545', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
20,openai/whisper-medium.en,https://arxiv.org/pdf/2212.04356.pdf,https://github.com/openai/whisper/blob/main/README.md,"{'likes': 27, 'downloads': 35274, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'whisper', 'automatic-speech-recognition', 'audio', 'hf-asr-leaderboard', 'en', 'arxiv:2212.04356', 'license:apache-2.0', 'model-index', 'endpoints_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'automatic-speech-recognition', 'class': 'classA'}"
21,google/canine-c,https://arxiv.org/pdf/2103.06874.pdf,https://github.com/google-research/language/blob/master/language/canine/README.md,"{'likes': 6, 'downloads': 105953, 'tags': ['transformers', 'pytorch', 'canine', 'feature-extraction', 'multilingual', 'af', 'sq', 'ar', 'an', 'hy', 'ast', 'az', 'ba', 'eu', 'bar', 'be', 'bn', 'inc', 'bs', 'br', 'bg', 'my', 'ca', 'ceb', 'ce', 'zh', 'cv', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fi', 'fr', 'gl', 'ka', 'de', 'el', 'gu', 'ht', 'he', 'hi', 'hu', 'is', 'io', 'id', 'ga', 'it', 'ja', 'jv', 'kn', 'kk', 'ky', 'ko', 'la', 'lv', 'lt', 'roa', 'nds', 'lm', 'mk', 'mg', 'ms', 'ml', 'mr', 'mn', 'min', 'ne', 'new', 'nb', 'nn', 'oc', 'fa', 'pms', 'pl', 'pt', 'pa', 'ro', 'ru', 'sco', 'sr', 'scn', 'sk', 'sl', 'aze', 'es', 'su', 'sw', 'sv', 'tl', 'tg', 'th', 'ta', 'tt', 'te', 'tr', 'uk', 'ud', 'uz', 'vi', 'vo', 'war', 'cy', 'fry', 'pnb', 'yo', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:2103.06874', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
22,xlm-roberta-large,https://arxiv.org/pdf/1911.02116.pdf,https://github.com/facebookresearch/fairseq/blob/main/examples/xlmr/README.md,"{'likes': 206, 'downloads': 2860395, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'onnx', 'safetensors', 'xlm-roberta', 'fill-mask', 'exbert', 'multilingual', 'af', 'am', 'ar', 'as', 'az', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'cs', 'cy', 'da', 'de', 'el', 'en', 'eo', 'es', 'et', 'eu', 'fa', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'hu', 'hy', 'id', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'ku', 'ky', 'la', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'om', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sa', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'su', 'sv', 'sw', 'ta', 'te', 'th', 'tl', 'tr', 'ug', 'uk', 'ur', 'uz', 'vi', 'xh', 'yi', 'zh', 'arxiv:1911.02116', 'license:mit', 'autotrain_compatible', 'endpoints_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
23,microsoft/swin-base-patch4-window12-384-in22k,https://arxiv.org/pdf/2103.14030.pdf,https://github.com/microsoft/Swin-Transformer/blob/main/README.md,"{'likes': 0, 'downloads': 1823, 'tags': ['transformers', 'pytorch', 'tf', 'swin', 'image-classification', 'dataset:imagenet-21k', 'arxiv:2103.14030', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
24,albert-base-v2,https://arxiv.org/pdf/1909.11942.pdf,https://github.com/google-research/albert/blob/master/README.md,"{'likes': 62, 'downloads': 5463983, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'rust', 'safetensors', 'albert', 'fill-mask', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1909.11942', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
25,microsoft/git-large,https://arxiv.org/pdf/2205.14100.pdf,https://github.com/microsoft/GenerativeImage2Text/blob/main/README.md,"{'likes': 9, 'downloads': 2875, 'tags': ['transformers', 'pytorch', 'git', 'text-generation', 'en', 'arxiv:2205.14100', 'vision', 'image-captioning', 'license:mit', 'image-to-text', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
26,vinvino02/glpn-nyu,https://arxiv.org/pdf/2201.07436.pdf,https://github.com/vinvino02/GLPDepth/blob/main/README.md,"{'likes': 10, 'downloads': 273929, 'tags': ['transformers', 'pytorch', 'glpn', 'depth-estimation', 'arxiv:2201.07436', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'depth-estimation', 'class': 'classA'}"
27,bert-base-cased,https://arxiv.org/pdf/1810.04805.pdf,https://github.com/google-research/bert/blob/master/README.md,"{'likes': 145, 'downloads': 5922632, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'bert', 'fill-mask', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1810.04805', 'exbert', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
28,google/bigbird-pegasus-large-arxiv,https://arxiv.org/pdf/2007.14062.pdf,https://github.com/google-research/bigbird/blob/master/README.md,"{'likes': 29, 'downloads': 7432, 'tags': ['transformers', 'pytorch', 'bigbird_pegasus', 'text2text-generation', 'en', 'dataset:scientific_papers', 'arxiv:2007.14062', 'summarization', 'license:apache-2.0', 'model-index', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'summarization', 'class': 'classA'}"
29,google/mobilenet_v2_0.75_160,https://arxiv.org/pdf/1801.04381.pdf,https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/README.md,"{'likes': 1, 'downloads': 479, 'tags': ['transformers', 'pytorch', 'mobilenet_v2', 'image-classification', 'dataset:imagenet-1k', 'arxiv:1801.04381', 'vision', 'license:other', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
30,deepmind/multimodal-perceiver,https://arxiv.org/pdf/2107.14795.pdf,https://github.com/NielsRogge/Transformers-Tutorials/blob/master/Perceiver/README.md,"{'likes': 13, 'downloads': 1412, 'tags': ['transformers', 'pytorch', 'perceiver', 'dataset:kinetics-700-2020', 'arxiv:2010.10864', 'arxiv:2107.14795', 'license:apache-2.0', 'region:us'], 'pipeline_tag': None, 'class': 'classA'}"
31,facebook/opt-125m,https://arxiv.org/pdf/2205.01068.pdf,https://github.com/facebookresearch/metaseq/blob/main/README.md,"{'likes': 76, 'downloads': 1157497, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'opt', 'text-generation', 'en', 'arxiv:2205.01068', 'arxiv:2005.14165', 'license:other', 'has_space', 'text-generation-inference', 'region:us'], 'pipeline_tag': 'text-generation', 'class': 'classA'}"
32,albert-xlarge-v2,https://arxiv.org/pdf/1909.11942.pdf,https://github.com/google-research/albert/blob/master/README.md,"{'likes': 3, 'downloads': 2982, 'tags': ['transformers', 'pytorch', 'tf', 'albert', 'fill-mask', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1909.11942', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
33,facebook/dinov2-large,https://arxiv.org/pdf/2304.07193.pdf,https://github.com/facebookresearch/dinov2/blob/main/README.md,"{'likes': 11, 'downloads': 30492, 'tags': ['transformers', 'pytorch', 'safetensors', 'dinov2', 'feature-extraction', 'arxiv:2304.07193', 'dino', 'vision', 'license:apache-2.0', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
34,google/vit-base-patch16-224,https://arxiv.org/pdf/2010.11929.pdf,https://github.com/google-research/vision_transformer/blob/main/README.md,"{'likes': 326, 'downloads': 553173, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'vit', 'image-classification', 'dataset:imagenet-1k', 'dataset:imagenet-21k', 'arxiv:2010.11929', 'arxiv:2006.03677', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
35,facebook/data2vec-vision-base,https://arxiv.org/pdf/2202.03555.pdf,https://github.com/facebookresearch/data2vec_vision/blob/main/beit/README.md,"{'likes': 2, 'downloads': 506, 'tags': ['transformers', 'pytorch', 'tf', 'data2vec-vision', 'feature-extraction', 'dataset:imagenet', 'dataset:imagenet-1k', 'arxiv:2202.03555', 'arxiv:2106.08254', 'image-classification', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
36,MCG-NJU/videomae-base,https://arxiv.org/pdf/2203.12602.pdf,https://github.com/MCG-NJU/VideoMAE/blob/main/README.md,"{'likes': 20, 'downloads': 26140, 'tags': ['transformers', 'pytorch', 'videomae', 'pretraining', 'arxiv:2203.12602', 'arxiv:2111.06377', 'vision', 'video-classification', 'license:cc-by-nc-4.0', 'has_space', 'region:us'], 'pipeline_tag': 'video-classification', 'class': 'classA'}"
37,facebook/mask2former-swin-large-ade-semantic,https://arxiv.org/pdf/2112.01527.pdf,https://github.com/facebookresearch/Mask2Former/blob/main/README.md,"{'likes': 0, 'downloads': 635, 'tags': ['transformers', 'pytorch', 'safetensors', 'mask2former', 'dataset:coco', 'arxiv:2112.01527', 'arxiv:2107.06278', 'vision', 'image-segmentation', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
38,google/tapas-small-finetuned-sqa,https://arxiv.org/pdf/2004.02349.pdf,https://github.com/google-research/tapas/blob/master/README.md,"{'likes': 0, 'downloads': 376, 'tags': ['transformers', 'pytorch', 'tf', 'tapas', 'table-question-answering', 'en', 'dataset:msr_sqa', 'arxiv:2004.02349', 'arxiv:2010.00571', 'license:apache-2.0', 'region:us'], 'pipeline_tag': 'table-question-answering', 'class': 'classA'}"
39,facebook/opt-2.7b,https://arxiv.org/pdf/2205.01068.pdf,https://github.com/facebookresearch/metaseq/blob/main/README.md,"{'likes': 45, 'downloads': 101608, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'opt', 'text-generation', 'en', 'arxiv:2205.01068', 'arxiv:2005.14165', 'license:other', 'has_space', 'text-generation-inference', 'region:us'], 'pipeline_tag': 'text-generation', 'class': 'classA'}"
40,nvidia/segformer-b1-finetuned-ade-512-512,https://arxiv.org/pdf/2105.15203.pdf,https://github.com/NVlabs/SegFormer/blob/master/README.md,"{'likes': 0, 'downloads': 2770, 'tags': ['transformers', 'pytorch', 'tf', 'segformer', 'dataset:scene_parse_150', 'arxiv:2105.15203', 'vision', 'image-segmentation', 'license:other', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
41,microsoft/beit-large-patch16-224,https://arxiv.org/pdf/2106.08254.pdf,https://github.com/microsoft/unilm/blob/master/beit/README.md,"{'likes': 0, 'downloads': 815, 'tags': ['transformers', 'pytorch', 'jax', 'beit', 'image-classification', 'dataset:imagenet', 'dataset:imagenet-21k', 'arxiv:2106.08254', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
42,nvidia/segformer-b2-finetuned-cityscapes-1024-1024,https://arxiv.org/pdf/2105.15203.pdf,https://github.com/NVlabs/SegFormer/blob/master/README.md,"{'likes': 0, 'downloads': 1883, 'tags': ['transformers', 'pytorch', 'tf', 'segformer', 'dataset:cityscapes', 'arxiv:2105.15203', 'vision', 'image-segmentation', 'license:other', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
43,ZinengTang/tvlt-base,https://arxiv.org/pdf/2209.14156.pdf,https://github.com/zinengtang/TVLT/blob/main/README.md,"{'likes': 1, 'downloads': 2011, 'tags': ['transformers', 'pytorch', 'tvlt', 'pretraining', 'arxiv:2209.14156', 'license:mit', 'region:us'], 'pipeline_tag': None, 'class': 'classA'}"
44,google/tapas-base-finetuned-sqa,https://arxiv.org/pdf/2004.02349.pdf,https://github.com/google-research/tapas/blob/master/README.md,"{'likes': 3, 'downloads': 4632, 'tags': ['transformers', 'pytorch', 'tf', 'tapas', 'table-question-answering', 'en', 'dataset:msr_sqa', 'arxiv:2004.02349', 'arxiv:2010.00571', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'table-question-answering', 'class': 'classA'}"
45,microsoft/beit-base-patch16-384,https://arxiv.org/pdf/2106.08254.pdf,https://github.com/microsoft/unilm/blob/master/beit/README.md,"{'likes': 4, 'downloads': 1430, 'tags': ['transformers', 'pytorch', 'jax', 'beit', 'image-classification', 'dataset:imagenet', 'dataset:imagenet-21k', 'arxiv:2106.08254', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
46,microsoft/git-large-textcaps,https://arxiv.org/pdf/2205.14100.pdf,https://github.com/microsoft/GenerativeImage2Text/blob/main/README.md,"{'likes': 15, 'downloads': 1723, 'tags': ['transformers', 'pytorch', 'git', 'text-generation', 'en', 'arxiv:2205.14100', 'vision', 'image-captioning', 'license:mit', 'image-to-text', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
47,SCUT-DLVCLab/lilt-roberta-en-base,https://arxiv.org/pdf/2202.13669.pdf,https://github.com/jpWang/LiLT/blob/main/README.md,"{'likes': 13, 'downloads': 7283, 'tags': ['transformers', 'pytorch', 'safetensors', 'lilt', 'feature-extraction', 'arxiv:2202.13669', 'vision', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
48,facebook/mask2former-swin-small-cityscapes-semantic,https://arxiv.org/pdf/2112.01527.pdf,https://github.com/facebookresearch/Mask2Former/blob/main/README.md,"{'likes': 0, 'downloads': 553, 'tags': ['transformers', 'pytorch', 'safetensors', 'mask2former', 'dataset:coco', 'arxiv:2112.01527', 'arxiv:2107.06278', 'vision', 'image-segmentation', 'license:other', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
49,Salesforce/codet5-base,https://arxiv.org/pdf/2109.00859.pdf,https://github.com/salesforce/CodeT5/blob/main/README.md,"{'likes': 79, 'downloads': 916374, 'tags': ['transformers', 'pytorch', 't5', 'text2text-generation', 'dataset:code_search_net', 'arxiv:2109.00859', 'arxiv:1909.09436', 'codet5', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'text-generation-inference', 'region:us'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
50,microsoft/git-large-r-coco,https://arxiv.org/pdf/2205.14100.pdf,https://github.com/microsoft/GenerativeImage2Text/blob/main/README.md,"{'likes': 3, 'downloads': 983, 'tags': ['transformers', 'pytorch', 'git', 'text-generation', 'en', 'arxiv:2205.14100', 'vision', 'image-captioning', 'license:mit', 'image-to-text', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
51,Salesforce/blip2-flan-t5-xl,https://arxiv.org/pdf/2301.12597.pdf,https://github.com/salesforce/LAVIS/blob/main/projects/blip2/README.md,"{'likes': 29, 'downloads': 61613, 'tags': ['transformers', 'pytorch', 'blip-2', 'visual-question-answering', 'en', 'arxiv:2301.12597', 'arxiv:2210.11416', 'vision', 'image-to-text', 'image-captioning', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
52,dandelin/vilt-b32-finetuned-coco,https://arxiv.org/pdf/2102.03334.pdf,https://github.com/dandelin/ViLT/blob/master/README.md,"{'likes': 0, 'downloads': 406, 'tags': ['transformers', 'pytorch', 'vilt', 'arxiv:2102.03334', 'license:apache-2.0', 'region:us'], 'pipeline_tag': None, 'class': 'classA'}"
53,facebook/bart-large,https://arxiv.org/pdf/1910.13461.pdf,https://github.com/facebookresearch/fairseq/blob/main/examples/bart/README.md,"{'likes': 115, 'downloads': 841206, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'rust', 'bart', 'feature-extraction', 'en', 'arxiv:1910.13461', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
54,microsoft/beit-base-patch16-224-pt22k,https://arxiv.org/pdf/2106.08254.pdf,https://github.com/microsoft/unilm/blob/master/beit/README.md,"{'likes': 1, 'downloads': 15351, 'tags': ['transformers', 'pytorch', 'jax', 'safetensors', 'beit', 'dataset:imagenet', 'dataset:imagenet-21k', 'arxiv:2106.08254', 'image-classification', 'vision', 'license:apache-2.0', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
55,google/flan-t5-xl,https://arxiv.org/pdf/2210.11416.pdf,https://github.com/google-research/t5x/blob/main/README.md,"{'likes': 351, 'downloads': 212726, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 't5', 'text2text-generation', 'en', 'fr', 'ro', 'de', 'multilingual', 'dataset:svakulenk0/qrecc', 'dataset:taskmaster2', 'dataset:djaym7/wiki_dialog', 'dataset:deepmind/code_contests', 'dataset:lambada', 'dataset:gsm8k', 'dataset:aqua_rat', 'dataset:esnli', 'dataset:quasc', 'dataset:qed', 'arxiv:2210.11416', 'arxiv:1910.09700', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'text-generation-inference', 'region:us'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
56,microsoft/git-base-textvqa,https://arxiv.org/pdf/2205.14100.pdf,https://github.com/microsoft/GenerativeImage2Text/blob/main/README.md,"{'likes': 6, 'downloads': 910, 'tags': ['transformers', 'pytorch', 'git', 'text-generation', 'en', 'arxiv:2205.14100', 'vision', 'license:mit', 'visual-question-answering', 'has_space', 'region:us'], 'pipeline_tag': 'visual-question-answering', 'class': 'classA'}"
57,funnel-transformer/small,https://arxiv.org/pdf/2006.03236.pdf,https://github.com/laiguokun/Funnel-Transformer/blob/master/README.md,"{'likes': 4, 'downloads': 4823, 'tags': ['transformers', 'pytorch', 'tf', 'funnel', 'feature-extraction', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'dataset:gigaword', 'arxiv:2006.03236', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
58,facebook/data2vec-vision-base-ft1k,https://arxiv.org/pdf/2202.03555.pdf,https://github.com/facebookresearch/data2vec_vision/blob/main/beit/README.md,"{'likes': 1, 'downloads': 2417, 'tags': ['transformers', 'pytorch', 'tf', 'data2vec-vision', 'image-classification', 'dataset:imagenet', 'dataset:imagenet-1k', 'arxiv:2202.03555', 'arxiv:2106.08254', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
59,facebook/dinov2-large-imagenet1k-1-layer,https://arxiv.org/pdf/2304.07193.pdf,https://github.com/facebookresearch/dinov2/blob/main/README.md,"{'likes': 0, 'downloads': 361, 'tags': ['transformers', 'pytorch', 'safetensors', 'dinov2', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2304.07193', 'dino', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
60,hustvl/vitmatte-small-composition-1k,https://arxiv.org/pdf/2305.15272.pdf,https://github.com/hustvl/ViTMatte/blob/main/README.md,"{'likes': 0, 'downloads': 803, 'tags': ['transformers', 'pytorch', 'vitmatte', 'arxiv:2305.15272', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': None, 'class': 'classA'}"
61,google/mobilenet_v1_0.75_192,https://arxiv.org/pdf/1704.04861.pdf,https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md,"{'likes': 2, 'downloads': 121320, 'tags': ['transformers', 'pytorch', 'mobilenet_v1', 'image-classification', 'dataset:imagenet-1k', 'arxiv:1704.04861', 'vision', 'license:other', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
62,SCUT-DLVCLab/lilt-infoxlm-base,https://arxiv.org/pdf/2202.13669.pdf,https://github.com/jpWang/LiLT/blob/main/README.md,"{'likes': 4, 'downloads': 837, 'tags': ['transformers', 'pytorch', 'safetensors', 'lilt', 'feature-extraction', 'arxiv:2202.13669', 'vision', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
63,microsoft/trocr-base-printed,https://arxiv.org/pdf/2109.10282.pdf,https://github.com/microsoft/unilm/blob/master/trocr/README.md,"{'likes': 97, 'downloads': 62266, 'tags': ['transformers', 'pytorch', 'vision-encoder-decoder', 'arxiv:2109.10282', 'trocr', 'image-to-text', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
64,facebook/mask2former-swin-tiny-ade-semantic,https://arxiv.org/pdf/2112.01527.pdf,https://github.com/facebookresearch/Mask2Former/blob/main/README.md,"{'likes': 0, 'downloads': 362, 'tags': ['transformers', 'pytorch', 'mask2former', 'dataset:coco', 'arxiv:2112.01527', 'arxiv:2107.06278', 'vision', 'image-segmentation', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
65,google/tapas-base-finetuned-wtq,https://arxiv.org/pdf/2004.02349.pdf,https://github.com/google-research/tapas/blob/master/README.md,"{'likes': 148, 'downloads': 15907, 'tags': ['transformers', 'pytorch', 'tf', 'tapas', 'table-question-answering', 'en', 'dataset:wikitablequestions', 'arxiv:2004.02349', 'arxiv:2010.00571', 'arxiv:1508.00305', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'table-question-answering', 'class': 'classA'}"
66,microsoft/git-base-coco,https://arxiv.org/pdf/2205.14100.pdf,https://github.com/microsoft/GenerativeImage2Text/blob/main/README.md,"{'likes': 9, 'downloads': 5138, 'tags': ['transformers', 'pytorch', 'git', 'text-generation', 'en', 'arxiv:2205.14100', 'vision', 'image-captioning', 'license:mit', 'image-to-text', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
67,facebook/mask2former-swin-base-coco-instance,https://arxiv.org/pdf/2112.01527.pdf,https://github.com/facebookresearch/Mask2Former/blob/main/README.md,"{'likes': 0, 'downloads': 1425, 'tags': ['transformers', 'pytorch', 'safetensors', 'mask2former', 'dataset:coco', 'arxiv:2112.01527', 'arxiv:2107.06278', 'vision', 'image-segmentation', 'license:other', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
68,bert-large-uncased-whole-word-masking,https://arxiv.org/pdf/1810.04805.pdf,https://github.com/google-research/bert/blob/master/README.md,"{'likes': 10, 'downloads': 85320, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'bert', 'fill-mask', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1810.04805', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
69,google/efficientnet-b2,https://arxiv.org/pdf/1905.11946.pdf,https://github.com/keras-team/keras/blob/master/README.md,"{'likes': 0, 'downloads': 50563, 'tags': ['transformers', 'pytorch', 'efficientnet', 'image-classification', 'dataset:imagenet-1k', 'arxiv:1905.11946', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
70,xlnet-large-cased,https://arxiv.org/pdf/1906.08237.pdf,https://github.com/zihangdai/xlnet/blob/master/README.md,"{'likes': 16, 'downloads': 15949, 'tags': ['transformers', 'pytorch', 'tf', 'xlnet', 'text-generation', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1906.08237', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'text-generation', 'class': 'classA'}"
71,apple/deeplabv3-mobilevit-xx-small,https://arxiv.org/pdf/2110.02178.pdf,https://github.com/apple/ml-cvnets/blob/main/README.md,"{'likes': 6, 'downloads': 1093, 'tags': ['transformers', 'pytorch', 'tf', 'coreml', 'mobilevit', 'dataset:pascal-voc', 'arxiv:2110.02178', 'arxiv:1706.05587', 'vision', 'image-segmentation', 'license:other', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
72,naver-clova-ix/donut-base-finetuned-docvqa,https://arxiv.org/pdf/2111.15664.pdf,https://github.com/clovaai/donut/blob/master/README.md,"{'likes': 62, 'downloads': 15118, 'tags': ['transformers', 'pytorch', 'vision-encoder-decoder', 'arxiv:2111.15664', 'donut', 'image-to-text', 'vision', 'license:mit', 'document-question-answering', 'has_space', 'region:us'], 'pipeline_tag': 'document-question-answering', 'class': 'classA'}"
73,google/tapas-large-finetuned-tabfact,https://arxiv.org/pdf/2010.00571.pdf,https://github.com/google-research/tapas/blob/master/README.md,"{'likes': 2, 'downloads': 1052, 'tags': ['transformers', 'pytorch', 'tf', 'tapas', 'text-classification', 'en', 'dataset:tab_fact', 'arxiv:2010.00571', 'arxiv:2004.02349', 'sequence-classification', 'license:apache-2.0', 'region:us'], 'pipeline_tag': 'text-classification', 'class': 'classA'}"
74,microsoft/git-large-vatex,https://arxiv.org/pdf/2205.14100.pdf,https://github.com/microsoft/GenerativeImage2Text/blob/main/README.md,"{'likes': 0, 'downloads': 471, 'tags': ['transformers', 'pytorch', 'git', 'text-generation', 'en', 'arxiv:2205.14100', 'vision', 'license:mit', 'region:us', 'has_space'], 'pipeline_tag': 'text-generation', 'class': 'classA'}"
75,deepmind/language-perceiver,https://arxiv.org/pdf/2107.14795.pdf,https://github.com/google-deepmind/deepmind-research/blob/master/perceiver/README.md,"{'likes': 14, 'downloads': 1697, 'tags': ['transformers', 'pytorch', 'perceiver', 'fill-mask', 'en', 'dataset:wikipedia', 'dataset:c4', 'arxiv:1810.04805', 'arxiv:2107.14795', 'arxiv:2004.03720', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
76,microsoft/swin-large-patch4-window12-384-in22k,https://arxiv.org/pdf/2103.14030.pdf,https://github.com/microsoft/Swin-Transformer/blob/main/README.md,"{'likes': 3, 'downloads': 80178, 'tags': ['transformers', 'pytorch', 'tf', 'swin', 'image-classification', 'dataset:imagenet-21k', 'arxiv:2103.14030', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
77,facebook/deformable-detr-detic,https://arxiv.org/pdf/2201.02605.pdf,https://github.com/facebookresearch/Detic/blob/main/README.md,"{'likes': 4, 'downloads': 744, 'tags': ['transformers', 'pytorch', 'deformable_detr', 'object-detection', 'dataset:coco', 'dataset:lvis', 'arxiv:2201.02605', 'arxiv:2010.04159', 'vision', 'detic', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'object-detection', 'class': 'classA'}"
78,naver-clova-ix/donut-base-finetuned-zhtrainticket,https://arxiv.org/pdf/2111.15664.pdf,https://github.com/clovaai/donut/blob/master/README.md,"{'likes': 0, 'downloads': 495, 'tags': ['transformers', 'pytorch', 'vision-encoder-decoder', 'arxiv:2111.15664', 'donut', 'image-to-text', 'vision', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
79,facebook/mask2former-swin-large-coco-instance,https://arxiv.org/pdf/2112.01527.pdf,https://github.com/facebookresearch/Mask2Former/blob/main/README.md,"{'likes': 2, 'downloads': 4265, 'tags': ['transformers', 'pytorch', 'safetensors', 'mask2former', 'dataset:coco', 'arxiv:2112.01527', 'arxiv:2107.06278', 'vision', 'image-segmentation', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
80,microsoft/git-large-coco,https://arxiv.org/pdf/2205.14100.pdf,https://github.com/microsoft/GenerativeImage2Text/blob/main/README.md,"{'likes': 68, 'downloads': 76563, 'tags': ['transformers', 'pytorch', 'safetensors', 'git', 'text-generation', 'en', 'arxiv:2205.14100', 'vision', 'image-captioning', 'license:mit', 'image-to-text', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
81,nvidia/segformer-b3-finetuned-cityscapes-1024-1024,https://arxiv.org/pdf/2105.15203.pdf,https://github.com/NVlabs/SegFormer/blob/master/README.md,"{'likes': 2, 'downloads': 3180, 'tags': ['transformers', 'pytorch', 'tf', 'segformer', 'dataset:cityscapes', 'arxiv:2105.15203', 'vision', 'image-segmentation', 'license:other', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
82,albert-xxlarge-v1,https://arxiv.org/pdf/1909.11942.pdf,https://github.com/google-research/albert/blob/master/README.md,"{'likes': 2, 'downloads': 2227, 'tags': ['transformers', 'pytorch', 'tf', 'albert', 'fill-mask', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1909.11942', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
83,facebook/deit-small-distilled-patch16-224,https://arxiv.org/pdf/2012.12877.pdf,https://github.com/facebookresearch/deit/blob/main/README.md,"{'likes': 3, 'downloads': 1850, 'tags': ['transformers', 'pytorch', 'tf', 'deit', 'image-classification', 'dataset:imagenet', 'arxiv:2012.12877', 'arxiv:2006.03677', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
84,facebook/dinov2-small,https://arxiv.org/pdf/2304.07193.pdf,https://github.com/facebookresearch/dinov2/blob/main/README.md,"{'likes': 1, 'downloads': 15998, 'tags': ['transformers', 'pytorch', 'safetensors', 'dinov2', 'feature-extraction', 'arxiv:2304.07193', 'dino', 'vision', 'license:apache-2.0', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
85,microsoft/speecht5_asr,https://arxiv.org/pdf/2110.07205.pdf,https://github.com/microsoft/SpeechT5/blob/main/README.md,"{'likes': 18, 'downloads': 3840, 'tags': ['transformers', 'pytorch', 'speecht5', 'automatic-speech-recognition', 'dataset:librispeech_asr', 'arxiv:2110.07205', 'audio', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'automatic-speech-recognition', 'class': 'classA'}"
86,facebook/dino-vitb16,https://arxiv.org/pdf/2104.14294.pdf,https://github.com/facebookresearch/dino/blob/main/README.md,"{'likes': 89, 'downloads': 10138, 'tags': ['transformers', 'pytorch', 'tf', 'vit', 'feature-extraction', 'dataset:imagenet-1k', 'arxiv:2104.14294', 'dino', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
87,microsoft/table-transformer-structure-recognition,https://arxiv.org/pdf/2110.00061.pdf,https://github.com/microsoft/table-transformer/blob/main/README.md,"{'likes': 86, 'downloads': 134362, 'tags': ['transformers', 'pytorch', 'safetensors', 'table-transformer', 'object-detection', 'arxiv:2110.00061', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'object-detection', 'class': 'classA'}"
88,jinhybr/OCR-DocVQA-Donut,https://arxiv.org/pdf/2111.15664.pdf,https://github.com/clovaai/donut/blob/master/README.md,"{'likes': 1, 'downloads': 413, 'tags': ['transformers', 'pytorch', 'vision-encoder-decoder', 'arxiv:2111.15664', 'donut', 'image-to-text', 'vision', 'license:mit', 'document-question-answering', 'has_space', 'region:us'], 'pipeline_tag': 'document-question-answering', 'class': 'classA'}"
89,deepmind/optical-flow-perceiver,https://arxiv.org/pdf/2107.14795.pdf,https://github.com/NielsRogge/Transformers-Tutorials/blob/master/Perceiver/README.md,"{'likes': 8, 'downloads': 4067, 'tags': ['transformers', 'pytorch', 'perceiver', 'dataset:autoflow', 'arxiv:2107.14795', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': None, 'class': 'classA'}"
90,openai/whisper-base.en,https://arxiv.org/pdf/2212.04356.pdf,https://github.com/openai/whisper/blob/main/README.md,"{'likes': 11, 'downloads': 79843, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'whisper', 'automatic-speech-recognition', 'audio', 'hf-asr-leaderboard', 'en', 'arxiv:2212.04356', 'license:apache-2.0', 'model-index', 'endpoints_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'automatic-speech-recognition', 'class': 'classA'}"
91,microsoft/xclip-large-patch14,https://arxiv.org/pdf/2208.02816.pdf,https://github.com/microsoft/VideoX/blob/master/X-CLIP/README.md,"{'likes': 0, 'downloads': 2363, 'tags': ['transformers', 'pytorch', 'xclip', 'feature-extraction', 'en', 'arxiv:2208.02816', 'vision', 'video-classification', 'license:mit', 'model-index', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
92,Salesforce/blip2-flan-t5-xl-coco,https://arxiv.org/pdf/2301.12597.pdf,https://github.com/salesforce/LAVIS/blob/main/projects/blip2/README.md,"{'likes': 10, 'downloads': 5091, 'tags': ['transformers', 'pytorch', 'blip-2', 'visual-question-answering', 'en', 'arxiv:2301.12597', 'arxiv:2210.11416', 'vision', 'image-to-text', 'image-captioning', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
93,paragon-AI/blip2-image-to-text,https://arxiv.org/pdf/2301.12597.pdf,https://github.com/salesforce/LAVIS/blob/main/projects/blip2/README.md,"{'likes': 6, 'downloads': 431, 'tags': ['transformers', 'blip-2', 'visual-question-answering', 'en', 'arxiv:2301.12597', 'vision', 'image-to-text', 'image-captioning', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
94,MCG-NJU/videomae-large-finetuned-kinetics,https://arxiv.org/pdf/2203.12602.pdf,https://github.com/MCG-NJU/VideoMAE/blob/main/README.md,"{'likes': 5, 'downloads': 486, 'tags': ['transformers', 'pytorch', 'videomae', 'video-classification', 'arxiv:2203.12602', 'arxiv:2111.06377', 'vision', 'license:cc-by-nc-4.0', 'has_space', 'region:us'], 'pipeline_tag': 'video-classification', 'class': 'classA'}"
95,facebook/convnextv2-tiny-1k-224,https://arxiv.org/pdf/2301.00808.pdf,https://github.com/facebookresearch/ConvNeXt-V2/blob/main/README.md,"{'likes': 0, 'downloads': 101802, 'tags': ['transformers', 'pytorch', 'tf', 'convnextv2', 'image-classification', 'vision', 'dataset:imagenet-1k', 'arxiv:2301.00808', 'license:apache-2.0', 'autotrain_compatible', 'endpoints_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
96,facebook/deit-base-distilled-patch16-384,https://arxiv.org/pdf/2012.12877.pdf,https://github.com/facebookresearch/deit/blob/main/README.md,"{'likes': 4, 'downloads': 2490, 'tags': ['transformers', 'pytorch', 'tf', 'safetensors', 'deit', 'image-classification', 'dataset:imagenet', 'arxiv:2012.12877', 'arxiv:2006.03677', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
97,MCG-NJU/videomae-base-short,https://arxiv.org/pdf/2203.12602.pdf,https://github.com/MCG-NJU/VideoMAE/blob/main/README.md,"{'likes': 3, 'downloads': 466, 'tags': ['transformers', 'pytorch', 'videomae', 'pretraining', 'arxiv:2203.12602', 'arxiv:2111.06377', 'vision', 'video-classification', 'license:cc-by-nc-4.0', 'has_space', 'region:us'], 'pipeline_tag': 'video-classification', 'class': 'classA'}"
98,bert-large-cased,https://arxiv.org/pdf/1810.04805.pdf,https://github.com/google-research/bert/blob/master/README.md,"{'likes': 12, 'downloads': 96523, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'bert', 'fill-mask', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1810.04805', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
99,albert-base-v1,https://arxiv.org/pdf/1909.11942.pdf,https://github.com/google-research/albert/blob/master/README.md,"{'likes': 3, 'downloads': 48013, 'tags': ['transformers', 'pytorch', 'tf', 'safetensors', 'albert', 'fill-mask', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1909.11942', 'exbert', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
100,google/tapas-large-finetuned-wikisql-supervised,https://arxiv.org/pdf/2004.02349.pdf,https://github.com/google-research/tapas/blob/master/README.md,"{'likes': 3, 'downloads': 334, 'tags': ['transformers', 'pytorch', 'tf', 'tapas', 'table-question-answering', 'en', 'dataset:wikisql', 'arxiv:2004.02349', 'arxiv:2010.00571', 'arxiv:1709.00103', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'table-question-answering', 'class': 'classA'}"
101,philschmid/flan-t5-xxl-sharded-fp16,https://arxiv.org/pdf/2210.11416.pdf,https://github.com/google-research/t5x/blob/main/docs/models.md,"{'likes': 46, 'downloads': 161478, 'tags': ['transformers', 'pytorch', 't5', 'text2text-generation', 'arxiv:2210.11416', 'endpoints-template', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'text-generation-inference', 'region:us'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
102,facebook/mask2former-swin-base-IN21k-ade-semantic,https://arxiv.org/pdf/2112.01527.pdf,https://github.com/facebookresearch/Mask2Former/blob/main/README.md,"{'likes': 2, 'downloads': 391, 'tags': ['transformers', 'pytorch', 'mask2former', 'dataset:coco', 'arxiv:2112.01527', 'arxiv:2107.06278', 'vision', 'image-segmentation', 'license:other', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
103,google/long-t5-local-base,https://arxiv.org/pdf/2112.07916.pdf,https://github.com/google-research/longt5/blob/master/README.md,"{'likes': 12, 'downloads': 7548, 'tags': ['transformers', 'pytorch', 'jax', 'safetensors', 'longt5', 'text2text-generation', 'en', 'arxiv:2112.07916', 'arxiv:1912.08777', 'arxiv:1910.10683', 'license:apache-2.0', 'autotrain_compatible', 'region:us', 'has_space'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
104,facebook/dinov2-giant,https://arxiv.org/pdf/2304.07193.pdf,https://github.com/facebookresearch/dinov2/blob/main/README.md,"{'likes': 10, 'downloads': 9376, 'tags': ['transformers', 'pytorch', 'safetensors', 'dinov2', 'feature-extraction', 'arxiv:2304.07193', 'dino', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
105,dandelin/vilt-b32-finetuned-nlvr2,https://arxiv.org/pdf/2102.03334.pdf,https://github.com/dandelin/ViLT/blob/master/README.md,"{'likes': 2, 'downloads': 3272, 'tags': ['transformers', 'pytorch', 'vilt', 'arxiv:2102.03334', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': None, 'class': 'classA'}"
106,naver-clova-ix/donut-base-finetuned-cord-v2,https://arxiv.org/pdf/2111.15664.pdf,https://github.com/clovaai/donut/blob/master/README.md,"{'likes': 41, 'downloads': 16733, 'tags': ['transformers', 'pytorch', 'vision-encoder-decoder', 'arxiv:2111.15664', 'donut', 'image-to-text', 'vision', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
107,nvidia/segformer-b0-finetuned-ade-512-512,https://arxiv.org/pdf/2105.15203.pdf,https://github.com/NVlabs/SegFormer/blob/master/README.md,"{'likes': 78, 'downloads': 45860, 'tags': ['transformers', 'pytorch', 'tf', 'segformer', 'dataset:scene_parse_150', 'arxiv:2105.15203', 'vision', 'image-segmentation', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
108,hustvl/yolos-small-dwr,https://arxiv.org/pdf/2106.00666.pdf,https://github.com/hustvl/YOLOS/blob/main/README.md,"{'likes': 3, 'downloads': 868, 'tags': ['transformers', 'pytorch', 'yolos', 'object-detection', 'dataset:coco', 'arxiv:2106.00666', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'object-detection', 'class': 'classA'}"
109,albert-xlarge-v1,https://arxiv.org/pdf/1909.11942.pdf,https://github.com/google-research/albert/blob/master/README.md,"{'likes': 0, 'downloads': 1017, 'tags': ['transformers', 'pytorch', 'tf', 'safetensors', 'albert', 'fill-mask', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1909.11942', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
110,hustvl/vitmatte-small-distinctions-646,https://arxiv.org/pdf/2305.15272.pdf,https://github.com/hustvl/ViTMatte/blob/main/README.md,"{'likes': 0, 'downloads': 499, 'tags': ['transformers', 'pytorch', 'vitmatte', 'arxiv:2305.15272', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': None, 'class': 'classA'}"
111,google/vit-large-patch16-224,https://arxiv.org/pdf/2010.11929.pdf,https://github.com/google-research/vision_transformer/blob/main/README.md,"{'likes': 11, 'downloads': 16572, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'vit', 'image-classification', 'dataset:imagenet-1k', 'dataset:imagenet-21k', 'arxiv:2010.11929', 'arxiv:2006.03677', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
112,facebook/xlm-roberta-xxl,https://arxiv.org/pdf/2105.00572.pdf,https://github.com/facebookresearch/fairseq/blob/main/examples/xlmr/README.md,"{'likes': 8, 'downloads': 946, 'tags': ['transformers', 'pytorch', 'xlm-roberta-xl', 'fill-mask', 'multilingual', 'af', 'am', 'ar', 'as', 'az', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'cs', 'cy', 'da', 'de', 'el', 'en', 'eo', 'es', 'et', 'eu', 'fa', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'hu', 'hy', 'id', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'ku', 'ky', 'la', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'om', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sa', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'su', 'sv', 'sw', 'ta', 'te', 'th', 'tl', 'tr', 'ug', 'uk', 'ur', 'uz', 'vi', 'xh', 'yi', 'zh', 'arxiv:2105.00572', 'license:mit', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
113,microsoft/git-base,https://arxiv.org/pdf/2205.14100.pdf,https://github.com/microsoft/GenerativeImage2Text/blob/main/README.md,"{'likes': 23, 'downloads': 12714, 'tags': ['transformers', 'pytorch', 'safetensors', 'git', 'text-generation', 'en', 'arxiv:2205.14100', 'vision', 'image-to-text', 'image-captioning', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
114,microsoft/dit-large-finetuned-rvlcdip,https://arxiv.org/pdf/2203.02378.pdf,https://github.com/microsoft/unilm/blob/master/dit/README.md,"{'likes': 4, 'downloads': 662, 'tags': ['transformers', 'pytorch', 'beit', 'image-classification', 'dataset:rvl_cdip', 'arxiv:2203.02378', 'dit', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
115,MCG-NJU/videomae-base-finetuned-kinetics,https://arxiv.org/pdf/2203.12602.pdf,https://github.com/MCG-NJU/VideoMAE/blob/main/README.md,"{'likes': 7, 'downloads': 196194, 'tags': ['transformers', 'pytorch', 'videomae', 'video-classification', 'arxiv:2203.12602', 'arxiv:2111.06377', 'vision', 'license:cc-by-nc-4.0', 'has_space', 'region:us'], 'pipeline_tag': 'video-classification', 'class': 'classA'}"
116,google/tapas-base-finetuned-tabfact,https://arxiv.org/pdf/2010.00571.pdf,https://github.com/google-research/tapas/blob/master/README.md,"{'likes': 1, 'downloads': 343, 'tags': ['transformers', 'pytorch', 'tf', 'tapas', 'text-classification', 'en', 'dataset:tab_fact', 'arxiv:2010.00571', 'arxiv:2004.02349', 'sequence-classification', 'license:apache-2.0', 'region:us'], 'pipeline_tag': 'text-classification', 'class': 'classA'}"
117,facebook/opt-30b,https://arxiv.org/pdf/2205.01068.pdf,https://github.com/facebookresearch/metaseq/blob/main/README.md,"{'likes': 132, 'downloads': 13050, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'opt', 'text-generation', 'en', 'arxiv:2205.01068', 'arxiv:2005.14165', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'text-generation', 'class': 'classA'}"
118,hustvl/yolos-small-300,https://arxiv.org/pdf/2106.00666.pdf,https://github.com/hustvl/YOLOS/blob/main/README.md,"{'likes': 3, 'downloads': 845, 'tags': ['transformers', 'pytorch', 'safetensors', 'yolos', 'object-detection', 'dataset:coco', 'arxiv:2106.00666', 'vision', 'license:apache-2.0', 'region:us'], 'pipeline_tag': 'object-detection', 'class': 'classA'}"
119,microsoft/trocr-large-printed,https://arxiv.org/pdf/2109.10282.pdf,https://github.com/microsoft/unilm/blob/master/trocr/README.md,"{'likes': 32, 'downloads': 467334, 'tags': ['transformers', 'pytorch', 'vision-encoder-decoder', 'arxiv:2109.10282', 'trocr', 'image-to-text', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
120,facebook/opt-6.7b,https://arxiv.org/pdf/2205.01068.pdf,https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/README.md,"{'likes': 73, 'downloads': 284592, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'opt', 'text-generation', 'en', 'arxiv:2205.01068', 'arxiv:2005.14165', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'text-generation', 'class': 'classA'}"
121,facebook/convnextv2-nano-22k-384,https://arxiv.org/pdf/2301.00808.pdf,https://github.com/facebookresearch/ConvNeXt-V2/blob/main/README.md,"{'likes': 1, 'downloads': 457, 'tags': ['transformers', 'pytorch', 'tf', 'convnextv2', 'image-classification', 'vision', 'dataset:imagenet-22k', 'arxiv:2301.00808', 'license:apache-2.0', 'autotrain_compatible', 'endpoints_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
122,bert-large-uncased,https://arxiv.org/pdf/1810.04805.pdf,https://github.com/google-research/bert/blob/master/README.md,"{'likes': 49, 'downloads': 876646, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'bert', 'fill-mask', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1810.04805', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
123,google/vit-large-patch32-224-in21k,https://arxiv.org/pdf/2010.11929.pdf,https://github.com/google-research/vision_transformer/blob/main/README.md,"{'likes': 0, 'downloads': 1828, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'vit', 'feature-extraction', 'dataset:imagenet-21k', 'arxiv:2010.11929', 'arxiv:2006.03677', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
124,google/canine-s,https://arxiv.org/pdf/2103.06874.pdf,https://github.com/google-research/language/blob/master/language/canine/README.md,"{'likes': 12, 'downloads': 6437, 'tags': ['transformers', 'pytorch', 'canine', 'feature-extraction', 'multilingual', 'af', 'sq', 'ar', 'an', 'hy', 'ast', 'az', 'ba', 'eu', 'bar', 'be', 'bn', 'inc', 'bs', 'br', 'bg', 'my', 'ca', 'ceb', 'ce', 'zh', 'cv', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fi', 'fr', 'gl', 'ka', 'de', 'el', 'gu', 'ht', 'he', 'hi', 'hu', 'is', 'io', 'id', 'ga', 'it', 'ja', 'jv', 'kn', 'kk', 'ky', 'ko', 'la', 'lv', 'lt', 'roa', 'nds', 'lm', 'mk', 'mg', 'ms', 'ml', 'mr', 'mn', 'min', 'ne', 'new', 'nb', 'nn', 'oc', 'fa', 'pms', 'pl', 'pt', 'pa', 'ro', 'ru', 'sco', 'sr', 'scn', 'sk', 'sl', 'aze', 'es', 'su', 'sw', 'sv', 'tl', 'tg', 'th', 'ta', 'tt', 'te', 'tr', 'uk', 'ud', 'uz', 'vi', 'vo', 'war', 'cy', 'fry', 'pnb', 'yo', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:2103.06874', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
125,microsoft/git-base-vqav2,https://arxiv.org/pdf/2205.14100.pdf,https://github.com/microsoft/GenerativeImage2Text/blob/main/README.md,"{'likes': 2, 'downloads': 582, 'tags': ['transformers', 'pytorch', 'git', 'text-generation', 'en', 'arxiv:2205.14100', 'vision', 'license:mit', 'visual-question-answering', 'has_space', 'region:us'], 'pipeline_tag': 'visual-question-answering', 'class': 'classA'}"
126,microsoft/trocr-large-handwritten,https://arxiv.org/pdf/2109.10282.pdf,https://github.com/microsoft/unilm/blob/master/trocr/README.md,"{'likes': 35, 'downloads': 11363, 'tags': ['transformers', 'pytorch', 'vision-encoder-decoder', 'arxiv:2109.10282', 'trocr', 'image-to-text', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
127,google/long-t5-tglobal-base,https://arxiv.org/pdf/2112.07916.pdf,https://github.com/google-research/longt5/blob/master/README.md,"{'likes': 28, 'downloads': 25706, 'tags': ['transformers', 'pytorch', 'jax', 'longt5', 'text2text-generation', 'en', 'arxiv:2112.07916', 'arxiv:1912.08777', 'arxiv:1910.10683', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
128,microsoft/cvt-13,https://arxiv.org/pdf/2103.15808.pdf,https://github.com/microsoft/CvT/blob/main/README.md,"{'likes': 5, 'downloads': 6996, 'tags': ['transformers', 'pytorch', 'tf', 'safetensors', 'cvt', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2103.15808', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
129,facebook/mask2former-swin-large-coco-panoptic,https://arxiv.org/pdf/2112.01527.pdf,https://github.com/facebookresearch/Mask2Former/blob/main/README.md,"{'likes': 14, 'downloads': 5674, 'tags': ['transformers', 'pytorch', 'mask2former', 'dataset:coco', 'arxiv:2112.01527', 'arxiv:2107.06278', 'vision', 'image-segmentation', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
130,facebook/mask2former-swin-small-ade-semantic,https://arxiv.org/pdf/2112.01527.pdf,https://github.com/facebookresearch/Mask2Former/blob/main/README.md,"{'likes': 2, 'downloads': 1585, 'tags': ['transformers', 'pytorch', 'safetensors', 'mask2former', 'dataset:coco', 'arxiv:2112.01527', 'arxiv:2107.06278', 'vision', 'image-segmentation', 'license:other', 'region:us', 'has_space'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
131,microsoft/trocr-base-stage1,https://arxiv.org/pdf/2109.10282.pdf,https://github.com/microsoft/unilm/blob/master/trocr/README.md,"{'likes': 6, 'downloads': 10536, 'tags': ['transformers', 'pytorch', 'vision-encoder-decoder', 'arxiv:2109.10282', 'trocr', 'image-to-text', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
132,google/switch-base-16,https://arxiv.org/pdf/2101.03961.pdf,https://github.com/google-research/t5x/blob/main/README.md,"{'likes': 2, 'downloads': 914, 'tags': ['transformers', 'pytorch', 'switch_transformers', 'text2text-generation', 'en', 'dataset:c4', 'arxiv:2101.03961', 'arxiv:1910.09700', 'license:apache-2.0', 'autotrain_compatible', 'region:us', 'has_space'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
133,MCG-NJU/videomae-huge-finetuned-kinetics,https://arxiv.org/pdf/2203.12602.pdf,https://github.com/MCG-NJU/VideoMAE/blob/main/README.md,"{'likes': 0, 'downloads': 1973, 'tags': ['transformers', 'pytorch', 'videomae', 'video-classification', 'arxiv:2203.12602', 'arxiv:2111.06377', 'vision', 'license:cc-by-nc-4.0', 'region:us', 'has_space'], 'pipeline_tag': 'video-classification', 'class': 'classA'}"
134,facebook/bart-large-cnn,https://arxiv.org/pdf/1910.13461.pdf,https://github.com/facebookresearch/fairseq/blob/main/examples/bart/README.md,"{'likes': 589, 'downloads': 1807856, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'rust', 'bart', 'text2text-generation', 'en', 'dataset:cnn_dailymail', 'arxiv:1910.13461', 'summarization', 'license:mit', 'model-index', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'summarization', 'class': 'classA'}"
135,SenseTime/deformable-detr-with-box-refine-two-stage,https://arxiv.org/pdf/2010.04159.pdf,https://github.com/fundamentalvision/Deformable-DETR/blob/main/README.md,"{'likes': 0, 'downloads': 357, 'tags': ['transformers', 'pytorch', 'safetensors', 'deformable_detr', 'object-detection', 'dataset:coco', 'arxiv:2010.04159', 'vision', 'license:apache-2.0', 'region:us'], 'pipeline_tag': 'object-detection', 'class': 'classA'}"
136,facebook/convnext-base-224-22k,https://arxiv.org/pdf/2201.03545.pdf,https://github.com/facebookresearch/ConvNeXt/blob/main/README.md,"{'likes': 1, 'downloads': 2193, 'tags': ['transformers', 'pytorch', 'tf', 'convnext', 'image-classification', 'dataset:imagenet-21k', 'arxiv:2201.03545', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
137,facebook/deit-base-patch16-384,https://arxiv.org/pdf/2012.12877.pdf,https://github.com/facebookresearch/deit/blob/main/README.md,"{'likes': 1, 'downloads': 342, 'tags': ['transformers', 'pytorch', 'tf', 'vit', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2012.12877', 'arxiv:2006.03677', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
138,albert-large-v2,https://arxiv.org/pdf/1909.11942.pdf,https://github.com/google-research/albert/blob/master/README.md,"{'likes': 12, 'downloads': 11754, 'tags': ['transformers', 'pytorch', 'tf', 'safetensors', 'albert', 'fill-mask', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1909.11942', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
139,nvidia/segformer-b1-finetuned-cityscapes-1024-1024,https://arxiv.org/pdf/2105.15203.pdf,https://github.com/NVlabs/SegFormer/blob/master/README.md,"{'likes': 7, 'downloads': 3123, 'tags': ['transformers', 'pytorch', 'tf', 'segformer', 'dataset:cityscapes', 'arxiv:2105.15203', 'vision', 'image-segmentation', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
140,Salesforce/instructblip-vicuna-7b,https://arxiv.org/pdf/2305.06500.pdf,https://github.com/lm-sys/FastChat/blob/main/README.md,"{'likes': 38, 'downloads': 87817, 'tags': ['transformers', 'pytorch', 'instructblip', 'text2text-generation', 'en', 'arxiv:2305.06500', 'vision', 'image-captioning', 'license:other', 'image-to-text', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
141,openai/whisper-small.en,https://arxiv.org/pdf/2212.04356.pdf,https://github.com/openai/whisper/blob/main/README.md,"{'likes': 14, 'downloads': 11455, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'whisper', 'automatic-speech-recognition', 'audio', 'hf-asr-leaderboard', 'en', 'arxiv:2212.04356', 'license:apache-2.0', 'model-index', 'endpoints_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'automatic-speech-recognition', 'class': 'classA'}"
142,microsoft/swinv2-base-patch4-window12-192-22k,https://arxiv.org/pdf/2111.09883.pdf,https://github.com/microsoft/Swin-Transformer/blob/main/README.md,"{'likes': 1, 'downloads': 1411, 'tags': ['transformers', 'pytorch', 'swinv2', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2111.09883', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
143,microsoft/resnet-18,https://arxiv.org/pdf/1512.03385.pdf,https://github.com/KaimingHe/deep-residual-networks/blob/master/README.md,"{'likes': 21, 'downloads': 64781, 'tags': ['transformers', 'pytorch', 'tf', 'safetensors', 'resnet', 'image-classification', 'dataset:imagenet-1k', 'arxiv:1512.03385', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
144,bert-large-uncased-whole-word-masking-finetuned-squad,https://arxiv.org/pdf/1810.04805.pdf,https://github.com/google-research/bert/blob/master/README.md,"{'likes': 94, 'downloads': 133976, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'bert', 'question-answering', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1810.04805', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'question-answering', 'class': 'classA'}"
145,facebook/mask2former-swin-large-mapillary-vistas-semantic,https://arxiv.org/pdf/2112.01527.pdf,https://github.com/facebookresearch/Mask2Former/blob/main/README.md,"{'likes': 0, 'downloads': 517, 'tags': ['transformers', 'pytorch', 'safetensors', 'mask2former', 'dataset:coco', 'arxiv:2112.01527', 'arxiv:2107.06278', 'vision', 'image-segmentation', 'license:other', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
146,roberta-base,https://arxiv.org/pdf/1907.11692.pdf,https://github.com/facebookresearch/fairseq/blob/main/examples/roberta/README.md,"{'likes': 219, 'downloads': 7289429, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'rust', 'safetensors', 'roberta', 'fill-mask', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1907.11692', 'arxiv:1806.02847', 'exbert', 'license:mit', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
147,microsoft/git-base-textcaps,https://arxiv.org/pdf/2205.14100.pdf,https://github.com/microsoft/GenerativeImage2Text/blob/main/README.md,"{'likes': 4, 'downloads': 481, 'tags': ['transformers', 'pytorch', 'git', 'text-generation', 'en', 'arxiv:2205.14100', 'vision', 'image-captioning', 'license:mit', 'image-to-text', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
148,microsoft/swin-large-patch4-window7-224-in22k,https://arxiv.org/pdf/2103.14030.pdf,https://github.com/microsoft/Swin-Transformer/blob/main/README.md,"{'likes': 1, 'downloads': 935, 'tags': ['transformers', 'pytorch', 'tf', 'swin', 'image-classification', 'dataset:imagenet-21k', 'arxiv:2103.14030', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
149,microsoft/git-base-vatex,https://arxiv.org/pdf/2205.14100.pdf,https://github.com/microsoft/GenerativeImage2Text/blob/main/README.md,"{'likes': 1, 'downloads': 990, 'tags': ['transformers', 'pytorch', 'safetensors', 'git', 'text-generation', 'en', 'arxiv:2205.14100', 'vision', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'text-generation', 'class': 'classA'}"
150,facebook/vit-mae-base,https://arxiv.org/pdf/2111.06377.pdf,https://github.com/facebookresearch/mae/blob/main/README.md,"{'likes': 14, 'downloads': 198937, 'tags': ['transformers', 'pytorch', 'tf', 'vit_mae', 'pretraining', 'dataset:imagenet-1k', 'arxiv:2111.06377', 'vision', 'license:apache-2.0', 'region:us'], 'pipeline_tag': None, 'class': 'classA'}"
151,google/deeplabv3_mobilenet_v2_1.0_513,https://arxiv.org/pdf/1801.04381.pdf,https://github.com/tensorflow/models/blob/master/research/deeplab/README.md,"{'likes': 0, 'downloads': 589, 'tags': ['transformers', 'pytorch', 'mobilenet_v2', 'dataset:pascal-voc', 'arxiv:1801.04381', 'arxiv:1802.02611', 'vision', 'image-segmentation', 'license:other', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
152,vinvino02/glpn-kitti,https://arxiv.org/pdf/2201.07436.pdf,https://github.com/vinvino02/GLPDepth/blob/main/README.md,"{'likes': 4, 'downloads': 3534, 'tags': ['transformers', 'pytorch', 'glpn', 'depth-estimation', 'arxiv:2201.07436', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'depth-estimation', 'class': 'classA'}"
153,google/bigbird-roberta-large,https://arxiv.org/pdf/2007.14062.pdf,https://github.com/google-research/bigbird/blob/master/README.md,"{'likes': 19, 'downloads': 997, 'tags': ['transformers', 'pytorch', 'jax', 'big_bird', 'fill-mask', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'dataset:cc_news', 'arxiv:2007.14062', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
154,microsoft/beit-large-patch16-224-pt22k-ft22k,https://arxiv.org/pdf/2106.08254.pdf,https://github.com/microsoft/unilm/blob/master/beit/README.md,"{'likes': 4, 'downloads': 2182, 'tags': ['transformers', 'pytorch', 'jax', 'beit', 'image-classification', 'dataset:imagenet', 'dataset:imagenet-21k', 'arxiv:2106.08254', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
155,facebook/convnext-tiny-224,https://arxiv.org/pdf/2201.03545.pdf,https://github.com/facebookresearch/ConvNeXt/blob/main/README.md,"{'likes': 9, 'downloads': 14410, 'tags': ['transformers', 'pytorch', 'tf', 'convnext', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2201.03545', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
156,MIT/ast-finetuned-audioset-16-16-0.442,https://arxiv.org/pdf/2104.01778.pdf,https://github.com/YuanGongND/ast/blob/master/README.md,"{'likes': 0, 'downloads': 475, 'tags': ['transformers', 'pytorch', 'safetensors', 'audio-spectrogram-transformer', 'audio-classification', 'arxiv:2104.01778', 'license:bsd-3-clause', 'region:us'], 'pipeline_tag': 'audio-classification', 'class': 'classA'}"
157,hustvl/yolos-base,https://arxiv.org/pdf/2106.00666.pdf,https://github.com/hustvl/YOLOS/blob/main/README.md,"{'likes': 12, 'downloads': 2374, 'tags': ['transformers', 'pytorch', 'yolos', 'object-detection', 'dataset:coco', 'arxiv:2106.00666', 'vision', 'license:apache-2.0', 'region:us'], 'pipeline_tag': 'object-detection', 'class': 'classA'}"
158,facebook/vit-mae-huge,https://arxiv.org/pdf/2111.06377.pdf,https://github.com/facebookresearch/mae/blob/main/README.md,"{'likes': 3, 'downloads': 475, 'tags': ['transformers', 'pytorch', 'tf', 'vit_mae', 'pretraining', 'dataset:imagenet-1k', 'arxiv:2111.06377', 'vision', 'license:apache-2.0', 'region:us'], 'pipeline_tag': None, 'class': 'classA'}"
159,microsoft/table-transformer-detection,https://arxiv.org/pdf/2110.00061.pdf,https://github.com/microsoft/table-transformer/blob/main/README.md,"{'likes': 66, 'downloads': 236915, 'tags': ['transformers', 'pytorch', 'safetensors', 'table-transformer', 'object-detection', 'arxiv:2110.00061', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'object-detection', 'class': 'classA'}"
160,nvidia/segformer-b5-finetuned-cityscapes-1024-1024,https://arxiv.org/pdf/2105.15203.pdf,https://github.com/NVlabs/SegFormer/blob/master/README.md,"{'likes': 14, 'downloads': 8386, 'tags': ['transformers', 'pytorch', 'tf', 'segformer', 'dataset:cityscapes', 'arxiv:2105.15203', 'vision', 'image-segmentation', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
161,facebook/nougat-base,https://arxiv.org/pdf/2308.13418.pdf,https://github.com/facebookresearch/nougat/blob/main/README.md,"{'likes': 20, 'downloads': 1486, 'tags': ['transformers', 'pytorch', 'vision-encoder-decoder', 'vision', 'nougat', 'image-to-text', 'arxiv:2308.13418', 'license:apache-2.0', 'endpoints_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
162,nvidia/segformer-b3-finetuned-ade-512-512,https://arxiv.org/pdf/2105.15203.pdf,https://github.com/NVlabs/SegFormer/blob/master/README.md,"{'likes': 5, 'downloads': 4660, 'tags': ['transformers', 'pytorch', 'tf', 'segformer', 'dataset:scene_parse_150', 'arxiv:2105.15203', 'vision', 'image-segmentation', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
163,google/vit-base-patch32-224-in21k,https://arxiv.org/pdf/2010.11929.pdf,https://github.com/google-research/vision_transformer/blob/main/README.md,"{'likes': 17, 'downloads': 3381, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'vit', 'feature-extraction', 'dataset:imagenet-21k', 'arxiv:2010.11929', 'arxiv:2006.03677', 'vision', 'license:apache-2.0', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
164,google/switch-base-128,https://arxiv.org/pdf/2101.03961.pdf,https://github.com/google-research/t5x/blob/main/README.md,"{'likes': 2, 'downloads': 2379, 'tags': ['transformers', 'pytorch', 'switch_transformers', 'text2text-generation', 'en', 'dataset:c4', 'arxiv:2101.03961', 'arxiv:1910.09700', 'license:apache-2.0', 'autotrain_compatible', 'region:us', 'has_space'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
165,Mediocreatmybest/blip2-opt-2.7b_8bit,https://arxiv.org/pdf/2301.12597.pdf,https://github.com/salesforce/LAVIS/blob/main/projects/blip2/README.md,"{'likes': 1, 'downloads': 320, 'tags': ['transformers', 'pytorch', 'safetensors', 'blip-2', 'visual-question-answering', 'en', 'arxiv:2301.12597', 'vision', 'image-to-text', 'image-captioning', 'license:mit', 'has_space', '8-bit', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
166,microsoft/dit-base,https://arxiv.org/pdf/2203.02378.pdf,https://github.com/microsoft/unilm/blob/master/dit/README.md,"{'likes': 9, 'downloads': 28325, 'tags': ['transformers', 'pytorch', 'beit', 'arxiv:2203.02378', 'dit', 'region:us'], 'pipeline_tag': None, 'class': 'classA'}"
167,google/tapas-base,https://arxiv.org/pdf/2004.02349.pdf,https://github.com/google-research/tapas/blob/master/README.md,"{'likes': 6, 'downloads': 1419, 'tags': ['transformers', 'pytorch', 'tf', 'tapas', 'feature-extraction', 'en', 'arxiv:2004.02349', 'arxiv:2010.00571', 'TapasModel', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
168,deepmind/vision-perceiver-fourier,https://arxiv.org/pdf/2107.14795.pdf,https://github.com/google-deepmind/deepmind-research/blob/master/perceiver/README.md,"{'likes': 2, 'downloads': 537, 'tags': ['transformers', 'pytorch', 'perceiver', 'image-classification', 'dataset:imagenet', 'arxiv:2107.14795', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
169,google/vit-base-patch16-384,https://arxiv.org/pdf/2010.11929.pdf,https://github.com/google-research/vision_transformer/blob/main/README.md,"{'likes': 17, 'downloads': 362275, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'vit', 'image-classification', 'dataset:imagenet', 'dataset:imagenet-21k', 'arxiv:2010.11929', 'arxiv:2006.03677', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
170,Intel/dpt-large-ade,https://arxiv.org/pdf/2103.13413.pdf,https://github.com/isl-org/DPT/blob/main/README.md,"{'likes': 4, 'downloads': 1169, 'tags': ['transformers', 'pytorch', 'dpt', 'dataset:scene_parse_150', 'arxiv:2103.13413', 'vision', 'image-segmentation', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
171,google/bigbird-pegasus-large-bigpatent,https://arxiv.org/pdf/2007.14062.pdf,https://github.com/google-research/bigbird/blob/master/README.md,"{'likes': 27, 'downloads': 6130, 'tags': ['transformers', 'pytorch', 'bigbird_pegasus', 'text2text-generation', 'en', 'dataset:big_patent', 'arxiv:2007.14062', 'summarization', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'summarization', 'class': 'classA'}"
172,google/switch-base-256,https://arxiv.org/pdf/2101.03961.pdf,https://github.com/google-research/t5x/blob/main/README.md,"{'likes': 2, 'downloads': 1192, 'tags': ['transformers', 'pytorch', 'switch_transformers', 'text2text-generation', 'en', 'dataset:c4', 'arxiv:2101.03961', 'arxiv:1910.09700', 'license:apache-2.0', 'autotrain_compatible', 'region:us', 'has_space'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
173,Salesforce/blip2-opt-6.7b,https://arxiv.org/pdf/2301.12597.pdf,https://github.com/salesforce/LAVIS/blob/main/projects/blip2/README.md,"{'likes': 48, 'downloads': 2793, 'tags': ['transformers', 'pytorch', 'blip-2', 'visual-question-answering', 'en', 'arxiv:2301.12597', 'vision', 'image-to-text', 'image-captioning', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
174,microsoft/swin-tiny-patch4-window7-224,https://arxiv.org/pdf/2103.14030.pdf,https://github.com/microsoft/Swin-Transformer/blob/main/README.md,"{'likes': 17, 'downloads': 13561, 'tags': ['transformers', 'pytorch', 'tf', 'safetensors', 'swin', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2103.14030', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
175,google/mobilenet_v1_1.0_224,https://arxiv.org/pdf/1704.04861.pdf,https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md,"{'likes': 0, 'downloads': 3259, 'tags': ['transformers', 'pytorch', 'mobilenet_v1', 'image-classification', 'dataset:imagenet-1k', 'arxiv:1704.04861', 'vision', 'license:other', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
176,nvidia/segformer-b0-finetuned-cityscapes-512-1024,https://arxiv.org/pdf/2105.15203.pdf,https://github.com/NVlabs/SegFormer/blob/master/README.md,"{'likes': 0, 'downloads': 936, 'tags': ['transformers', 'pytorch', 'tf', 'segformer', 'dataset:cityscapes', 'arxiv:2105.15203', 'vision', 'image-segmentation', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
177,dandelin/vilt-b32-finetuned-vqa,https://arxiv.org/pdf/2102.03334.pdf,https://github.com/dandelin/ViLT/blob/master/README.md,"{'likes': 252, 'downloads': 77918, 'tags': ['transformers', 'pytorch', 'vilt', 'arxiv:2102.03334', 'visual-question-answering', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'visual-question-answering', 'class': 'classA'}"
178,facebook/detr-resnet-50,https://arxiv.org/pdf/2005.12872.pdf,https://github.com/facebookresearch/detr/blob/main/README.md,"{'likes': 278, 'downloads': 610855, 'tags': ['transformers', 'pytorch', 'detr', 'object-detection', 'vision', 'dataset:coco', 'arxiv:2005.12872', 'license:apache-2.0', 'endpoints_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'object-detection', 'class': 'classA'}"
179,google/flan-t5-base,https://arxiv.org/pdf/2210.11416.pdf,https://github.com/google-research/t5x/blob/main/README.md,"{'likes': 350, 'downloads': 5470515, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 't5', 'text2text-generation', 'en', 'fr', 'ro', 'de', 'multilingual', 'dataset:svakulenk0/qrecc', 'dataset:taskmaster2', 'dataset:djaym7/wiki_dialog', 'dataset:deepmind/code_contests', 'dataset:lambada', 'dataset:gsm8k', 'dataset:aqua_rat', 'dataset:esnli', 'dataset:quasc', 'dataset:qed', 'arxiv:2210.11416', 'arxiv:1910.09700', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'text-generation-inference', 'region:us'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
180,google/flan-t5-xxl,https://arxiv.org/pdf/2210.11416.pdf,https://github.com/google-research/t5x/blob/main/README.md,"{'likes': 916, 'downloads': 304315, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 't5', 'text2text-generation', 'en', 'fr', 'ro', 'de', 'multilingual', 'dataset:svakulenk0/qrecc', 'dataset:taskmaster2', 'dataset:djaym7/wiki_dialog', 'dataset:deepmind/code_contests', 'dataset:lambada', 'dataset:gsm8k', 'dataset:aqua_rat', 'dataset:esnli', 'dataset:quasc', 'dataset:qed', 'arxiv:2210.11416', 'arxiv:1910.09700', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'text-generation-inference', 'region:us'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
181,microsoft/beit-base-patch16-224,https://arxiv.org/pdf/2106.08254.pdf,https://github.com/microsoft/unilm/blob/master/beit/README.md,"{'likes': 5, 'downloads': 18609, 'tags': ['transformers', 'pytorch', 'jax', 'beit', 'image-classification', 'dataset:imagenet', 'dataset:imagenet-21k', 'arxiv:2106.08254', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
182,openai/whisper-tiny.en,https://arxiv.org/pdf/2212.04356.pdf,https://github.com/openai/whisper/blob/main/README.md,"{'likes': 59, 'downloads': 168766, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'whisper', 'automatic-speech-recognition', 'audio', 'hf-asr-leaderboard', 'en', 'arxiv:2212.04356', 'license:apache-2.0', 'model-index', 'endpoints_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'automatic-speech-recognition', 'class': 'classA'}"
183,microsoft/beit-large-patch16-384,https://arxiv.org/pdf/2106.08254.pdf,https://github.com/microsoft/unilm/blob/master/beit/README.md,"{'likes': 0, 'downloads': 648, 'tags': ['transformers', 'pytorch', 'jax', 'beit', 'image-classification', 'dataset:imagenet', 'dataset:imagenet-21k', 'arxiv:2106.08254', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
184,facebook/deit-small-patch16-224,https://arxiv.org/pdf/2012.12877.pdf,https://github.com/facebookresearch/deit/blob/main/README.md,"{'likes': 2, 'downloads': 3577, 'tags': ['transformers', 'pytorch', 'tf', 'vit', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2012.12877', 'arxiv:2006.03677', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
185,bert-base-uncased,https://arxiv.org/pdf/1810.04805.pdf,https://github.com/google-research/bert/blob/master/README.md,"{'likes': 1114, 'downloads': 46645244, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'rust', 'coreml', 'onnx', 'safetensors', 'bert', 'fill-mask', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1810.04805', 'exbert', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
186,google/tapas-large-finetuned-wtq,https://arxiv.org/pdf/2004.02349.pdf,https://github.com/google-research/tapas/blob/master/README.md,"{'likes': 65, 'downloads': 80389, 'tags': ['transformers', 'pytorch', 'tf', 'safetensors', 'tapas', 'table-question-answering', 'en', 'dataset:wikitablequestions', 'arxiv:2004.02349', 'arxiv:2010.00571', 'arxiv:1508.00305', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'table-question-answering', 'class': 'classA'}"
187,hustvl/yolos-tiny,https://arxiv.org/pdf/2106.00666.pdf,https://github.com/hustvl/YOLOS/blob/main/README.md,"{'likes': 141, 'downloads': 281769, 'tags': ['transformers', 'pytorch', 'safetensors', 'yolos', 'object-detection', 'dataset:coco', 'arxiv:2106.00666', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'object-detection', 'class': 'classA'}"
188,facebook/dino-vits16,https://arxiv.org/pdf/2104.14294.pdf,https://github.com/facebookresearch/dino/blob/main/README.md,"{'likes': 10, 'downloads': 9455, 'tags': ['transformers', 'pytorch', 'vit', 'feature-extraction', 'dataset:imagenet-1k', 'arxiv:2104.14294', 'dino', 'vision', 'license:apache-2.0', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
189,facebook/deit-tiny-distilled-patch16-224,https://arxiv.org/pdf/2012.12877.pdf,https://github.com/facebookresearch/deit/blob/main/README.md,"{'likes': 3, 'downloads': 5320, 'tags': ['transformers', 'pytorch', 'tf', 'deit', 'image-classification', 'dataset:imagenet', 'arxiv:2012.12877', 'arxiv:2006.03677', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
190,microsoft/swinv2-large-patch4-window12to24-192to384-22kto1k-ft,https://arxiv.org/pdf/2111.09883.pdf,https://github.com/microsoft/Swin-Transformer/blob/main/README.md,"{'likes': 0, 'downloads': 2077, 'tags': ['transformers', 'pytorch', 'swinv2', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2111.09883', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
191,microsoft/swinv2-large-patch4-window12-192-22k,https://arxiv.org/pdf/2111.09883.pdf,https://github.com/microsoft/Swin-Transformer/blob/main/README.md,"{'likes': 6, 'downloads': 1187, 'tags': ['transformers', 'pytorch', 'swinv2', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2111.09883', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
192,openai/whisper-large-v2,https://arxiv.org/pdf/2212.04356.pdf,https://github.com/openai/whisper/blob/main/README.md,"{'likes': 1122, 'downloads': 156393, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'whisper', 'automatic-speech-recognition', 'audio', 'hf-asr-leaderboard', 'en', 'zh', 'de', 'es', 'ru', 'ko', 'fr', 'ja', 'pt', 'tr', 'pl', 'ca', 'nl', 'ar', 'sv', 'it', 'id', 'hi', 'fi', 'vi', 'he', 'uk', 'el', 'ms', 'cs', 'ro', 'da', 'hu', 'ta', 'no', 'th', 'ur', 'hr', 'bg', 'lt', 'la', 'mi', 'ml', 'cy', 'sk', 'te', 'fa', 'lv', 'bn', 'sr', 'az', 'sl', 'kn', 'et', 'mk', 'br', 'eu', 'is', 'hy', 'ne', 'mn', 'bs', 'kk', 'sq', 'sw', 'gl', 'mr', 'pa', 'si', 'km', 'sn', 'yo', 'so', 'af', 'oc', 'ka', 'be', 'tg', 'sd', 'gu', 'am', 'yi', 'lo', 'uz', 'fo', 'ht', 'ps', 'tk', 'nn', 'mt', 'sa', 'lb', 'my', 'bo', 'tl', 'mg', 'as', 'tt', 'haw', 'ln', 'ha', 'ba', 'jw', 'su', 'arxiv:2212.04356', 'license:apache-2.0', 'endpoints_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'automatic-speech-recognition', 'class': 'classA'}"
193,deepmind/vision-perceiver-learned,https://arxiv.org/pdf/2107.14795.pdf,https://github.com/google-deepmind/deepmind-research/blob/master/perceiver/README.md,"{'likes': 10, 'downloads': 651, 'tags': ['transformers', 'pytorch', 'perceiver', 'image-classification', 'dataset:imagenet', 'arxiv:2107.14795', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
194,microsoft/trocr-base-handwritten,https://arxiv.org/pdf/2109.10282.pdf,https://github.com/microsoft/unilm/blob/master/trocr/README.md,"{'likes': 93, 'downloads': 297289, 'tags': ['transformers', 'pytorch', 'vision-encoder-decoder', 'arxiv:2109.10282', 'trocr', 'image-to-text', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
195,naver-clova-ix/donut-base-finetuned-rvlcdip,https://arxiv.org/pdf/2111.15664.pdf,https://github.com/clovaai/donut/blob/master/README.md,"{'likes': 5, 'downloads': 2628, 'tags': ['transformers', 'pytorch', 'vision-encoder-decoder', 'arxiv:2111.15664', 'donut', 'image-to-text', 'vision', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
196,facebook/regnet-y-040,https://arxiv.org/pdf/2003.13678.pdf,https://github.com/facebookresearch/pycls/blob/main/README.md,"{'likes': 1, 'downloads': 1668, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'regnet', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2003.13678', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
197,google/flan-t5-large,https://arxiv.org/pdf/2210.11416.pdf,https://github.com/google-research/t5x/blob/main/README.md,"{'likes': 262, 'downloads': 2574176, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 't5', 'text2text-generation', 'en', 'fr', 'ro', 'de', 'multilingual', 'dataset:svakulenk0/qrecc', 'dataset:taskmaster2', 'dataset:djaym7/wiki_dialog', 'dataset:deepmind/code_contests', 'dataset:lambada', 'dataset:gsm8k', 'dataset:aqua_rat', 'dataset:esnli', 'dataset:quasc', 'dataset:qed', 'arxiv:2210.11416', 'arxiv:1910.09700', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'text-generation-inference', 'region:us'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
198,xlm-roberta-base,https://arxiv.org/pdf/1911.02116.pdf,https://github.com/facebookresearch/fairseq/blob/main/examples/xlmr/README.md,"{'likes': 386, 'downloads': 12342254, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'onnx', 'safetensors', 'xlm-roberta', 'fill-mask', 'multilingual', 'af', 'am', 'ar', 'as', 'az', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'cs', 'cy', 'da', 'de', 'el', 'en', 'eo', 'es', 'et', 'eu', 'fa', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'hu', 'hy', 'id', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'ku', 'ky', 'la', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'om', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sa', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'su', 'sv', 'sw', 'ta', 'te', 'th', 'tl', 'tr', 'ug', 'uk', 'ur', 'uz', 'vi', 'xh', 'yi', 'zh', 'arxiv:1911.02116', 'exbert', 'license:mit', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
199,xlnet-base-cased,https://arxiv.org/pdf/1906.08237.pdf,https://github.com/zihangdai/xlnet/blob/master/README.md,"{'likes': 46, 'downloads': 224992, 'tags': ['transformers', 'pytorch', 'tf', 'rust', 'xlnet', 'text-generation', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1906.08237', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'text-generation', 'class': 'classA'}"
200,google/roberta2roberta_L-24_wikisplit,https://arxiv.org/pdf/1907.12461.pdf,https://github.com/google-research-datasets/wiki-split/blob/master/README.md,"{'likes': 7, 'downloads': 509, 'tags': ['transformers', 'pytorch', 'encoder-decoder', 'text2text-generation', 'en', 'arxiv:1907.12461', 'license:apache-2.0', 'autotrain_compatible', 'region:us', 'has_space'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
201,microsoft/beit-large-patch16-224-pt22k,https://arxiv.org/pdf/2106.08254.pdf,https://github.com/microsoft/unilm/blob/master/beit/README.md,"{'likes': 1, 'downloads': 758, 'tags': ['transformers', 'pytorch', 'jax', 'safetensors', 'beit', 'dataset:imagenet', 'dataset:imagenet-21k', 'arxiv:2106.08254', 'image-classification', 'vision', 'license:apache-2.0', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
202,google/switch-base-8,https://arxiv.org/pdf/2101.03961.pdf,https://github.com/google-research/t5x/blob/main/README.md,"{'likes': 9, 'downloads': 7842, 'tags': ['transformers', 'pytorch', 'switch_transformers', 'text2text-generation', 'en', 'dataset:c4', 'arxiv:2101.03961', 'arxiv:1910.09700', 'license:apache-2.0', 'autotrain_compatible', 'region:us', 'has_space'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
203,google/vit-base-patch32-384,https://arxiv.org/pdf/2010.11929.pdf,https://github.com/google-research/vision_transformer/blob/main/README.md,"{'likes': 12, 'downloads': 3457, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'vit', 'image-classification', 'dataset:imagenet-1k', 'dataset:imagenet-21k', 'arxiv:2010.11929', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
204,microsoft/swin-large-patch4-window12-384,https://arxiv.org/pdf/2103.14030.pdf,https://github.com/microsoft/Swin-Transformer/blob/main/README.md,"{'likes': 1, 'downloads': 820, 'tags': ['transformers', 'pytorch', 'tf', 'swin', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2103.14030', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
205,facebook/sam-vit-large,https://arxiv.org/pdf/2304.02643.pdf,https://github.com/facebookresearch/segment-anything/blob/main/README.md,"{'likes': 11, 'downloads': 74748, 'tags': ['transformers', 'pytorch', 'tf', 'sam', 'mask-generation', 'license:apache-2.0', 'region:us'], 'pipeline_tag': None, 'class': 'classA'}"
206,facebook/dino-vits8,https://arxiv.org/pdf/2104.14294.pdf,https://github.com/facebookresearch/dino/blob/main/README.md,"{'likes': 8, 'downloads': 6207, 'tags': ['transformers', 'pytorch', 'vit', 'feature-extraction', 'dataset:imagenet-1k', 'arxiv:2104.14294', 'dino', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
207,MIT/ast-finetuned-audioset-10-10-0.4593,https://arxiv.org/pdf/2104.01778.pdf,https://github.com/YuanGongND/ast/blob/master/README.md,"{'likes': 65, 'downloads': 42115, 'tags': ['transformers', 'pytorch', 'safetensors', 'audio-spectrogram-transformer', 'audio-classification', 'arxiv:2104.01778', 'license:bsd-3-clause', 'has_space', 'region:us'], 'pipeline_tag': 'audio-classification', 'class': 'classA'}"
208,openai/whisper-medium,https://arxiv.org/pdf/2212.04356.pdf,https://github.com/openai/whisper/blob/main/README.md,"{'likes': 116, 'downloads': 98784, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'whisper', 'automatic-speech-recognition', 'audio', 'hf-asr-leaderboard', 'en', 'zh', 'de', 'es', 'ru', 'ko', 'fr', 'ja', 'pt', 'tr', 'pl', 'ca', 'nl', 'ar', 'sv', 'it', 'id', 'hi', 'fi', 'vi', 'he', 'uk', 'el', 'ms', 'cs', 'ro', 'da', 'hu', 'ta', 'no', 'th', 'ur', 'hr', 'bg', 'lt', 'la', 'mi', 'ml', 'cy', 'sk', 'te', 'fa', 'lv', 'bn', 'sr', 'az', 'sl', 'kn', 'et', 'mk', 'br', 'eu', 'is', 'hy', 'ne', 'mn', 'bs', 'kk', 'sq', 'sw', 'gl', 'mr', 'pa', 'si', 'km', 'sn', 'yo', 'so', 'af', 'oc', 'ka', 'be', 'tg', 'sd', 'gu', 'am', 'yi', 'lo', 'uz', 'fo', 'ht', 'ps', 'tk', 'nn', 'mt', 'sa', 'lb', 'my', 'bo', 'tl', 'mg', 'as', 'tt', 'haw', 'ln', 'ha', 'ba', 'jw', 'su', 'arxiv:2212.04356', 'license:apache-2.0', 'model-index', 'endpoints_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'automatic-speech-recognition', 'class': 'classA'}"
209,google/switch-base-64,https://arxiv.org/pdf/2101.03961.pdf,https://github.com/google-research/t5x/blob/main/README.md,"{'likes': 2, 'downloads': 2663, 'tags': ['transformers', 'pytorch', 'switch_transformers', 'text2text-generation', 'en', 'dataset:c4', 'arxiv:2101.03961', 'arxiv:1910.09700', 'license:apache-2.0', 'autotrain_compatible', 'region:us', 'has_space'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
210,microsoft/dit-large,https://arxiv.org/pdf/2203.02378.pdf,https://github.com/microsoft/unilm/blob/master/dit/README.md,"{'likes': 7, 'downloads': 3488, 'tags': ['transformers', 'pytorch', 'beit', 'arxiv:2203.02378', 'dit', 'region:us'], 'pipeline_tag': None, 'class': 'classA'}"
211,openai/whisper-small,https://arxiv.org/pdf/2212.04356.pdf,https://github.com/openai/whisper/blob/main/README.md,"{'likes': 91, 'downloads': 100202, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'whisper', 'automatic-speech-recognition', 'audio', 'hf-asr-leaderboard', 'en', 'zh', 'de', 'es', 'ru', 'ko', 'fr', 'ja', 'pt', 'tr', 'pl', 'ca', 'nl', 'ar', 'sv', 'it', 'id', 'hi', 'fi', 'vi', 'he', 'uk', 'el', 'ms', 'cs', 'ro', 'da', 'hu', 'ta', 'no', 'th', 'ur', 'hr', 'bg', 'lt', 'la', 'mi', 'ml', 'cy', 'sk', 'te', 'fa', 'lv', 'bn', 'sr', 'az', 'sl', 'kn', 'et', 'mk', 'br', 'eu', 'is', 'hy', 'ne', 'mn', 'bs', 'kk', 'sq', 'sw', 'gl', 'mr', 'pa', 'si', 'km', 'sn', 'yo', 'so', 'af', 'oc', 'ka', 'be', 'tg', 'sd', 'gu', 'am', 'yi', 'lo', 'uz', 'fo', 'ht', 'ps', 'tk', 'nn', 'mt', 'sa', 'lb', 'my', 'bo', 'tl', 'mg', 'as', 'tt', 'haw', 'ln', 'ha', 'ba', 'jw', 'su', 'arxiv:2212.04356', 'license:apache-2.0', 'model-index', 'endpoints_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'automatic-speech-recognition', 'class': 'classA'}"
212,facebook/mask2former-swin-large-mapillary-vistas-panoptic,https://arxiv.org/pdf/2112.01527.pdf,https://github.com/facebookresearch/Mask2Former/blob/main/README.md,"{'likes': 1, 'downloads': 1703, 'tags': ['transformers', 'pytorch', 'safetensors', 'mask2former', 'dataset:coco', 'arxiv:2112.01527', 'arxiv:2107.06278', 'vision', 'image-segmentation', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
213,microsoft/swin-small-patch4-window7-224,https://arxiv.org/pdf/2103.14030.pdf,https://github.com/microsoft/Swin-Transformer/blob/main/README.md,"{'likes': 0, 'downloads': 1065, 'tags': ['transformers', 'pytorch', 'tf', 'swin', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2103.14030', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
214,facebook/mask2former-swin-large-cityscapes-semantic,https://arxiv.org/pdf/2112.01527.pdf,https://github.com/facebookresearch/Mask2Former/blob/main/README.md,"{'likes': 6, 'downloads': 16734, 'tags': ['transformers', 'pytorch', 'safetensors', 'mask2former', 'dataset:coco', 'arxiv:2112.01527', 'arxiv:2107.06278', 'vision', 'image-segmentation', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
215,microsoft/swin-base-patch4-window7-224,https://arxiv.org/pdf/2103.14030.pdf,https://github.com/microsoft/Swin-Transformer/blob/main/README.md,"{'likes': 3, 'downloads': 8574, 'tags': ['transformers', 'pytorch', 'tf', 'safetensors', 'swin', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2103.14030', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
216,facebook/opt-66b,https://arxiv.org/pdf/2205.01068.pdf,https://github.com/facebookresearch/metaseq/blob/main/README.md,"{'likes': 171, 'downloads': 9635, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'opt', 'text-generation', 'en', 'arxiv:2205.01068', 'arxiv:2005.14165', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'text-generation', 'class': 'classA'}"
217,facebook/convnext-base-224,https://arxiv.org/pdf/2201.03545.pdf,https://github.com/facebookresearch/ConvNeXt/blob/main/README.md,"{'likes': 7, 'downloads': 337, 'tags': ['transformers', 'pytorch', 'tf', 'convnext', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2201.03545', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
218,google/vit-large-patch32-384,https://arxiv.org/pdf/2010.11929.pdf,https://github.com/google-research/vision_transformer/blob/main/README.md,"{'likes': 6, 'downloads': 6482, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'vit', 'image-classification', 'dataset:imagenet', 'dataset:imagenet-21k', 'arxiv:2010.11929', 'arxiv:2006.03677', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
219,albert-large-v1,https://arxiv.org/pdf/1909.11942.pdf,https://github.com/google-research/albert/blob/master/README.md,"{'likes': 0, 'downloads': 1140, 'tags': ['transformers', 'pytorch', 'tf', 'albert', 'fill-mask', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1909.11942', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
220,google/tapas-base-finetuned-wikisql-supervised,https://arxiv.org/pdf/2004.02349.pdf,https://github.com/google-research/tapas/blob/master/README.md,"{'likes': 5, 'downloads': 643, 'tags': ['transformers', 'pytorch', 'tf', 'tapas', 'table-question-answering', 'en', 'dataset:wikisql', 'arxiv:2004.02349', 'arxiv:2010.00571', 'arxiv:1709.00103', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'table-question-answering', 'class': 'classA'}"
221,facebook/dino-vitb8,https://arxiv.org/pdf/2104.14294.pdf,https://github.com/facebookresearch/dino/blob/main/README.md,"{'likes': 8, 'downloads': 6598, 'tags': ['transformers', 'pytorch', 'vit', 'feature-extraction', 'dataset:imagenet-1k', 'arxiv:2104.14294', 'dino', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
222,facebook/deit-base-distilled-patch16-224,https://arxiv.org/pdf/2012.12877.pdf,https://github.com/facebookresearch/deit/blob/main/README.md,"{'likes': 20, 'downloads': 15652, 'tags': ['transformers', 'pytorch', 'tf', 'deit', 'image-classification', 'dataset:imagenet', 'arxiv:2012.12877', 'arxiv:2006.03677', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
223,bert-large-cased-whole-word-masking,https://arxiv.org/pdf/1810.04805.pdf,https://github.com/google-research/bert/blob/master/README.md,"{'likes': 4, 'downloads': 1305, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'bert', 'fill-mask', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1810.04805', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
224,google/efficientnet-b0,https://arxiv.org/pdf/1905.11946.pdf,https://github.com/keras-team/keras/blob/master/README.md,"{'likes': 3, 'downloads': 4366, 'tags': ['transformers', 'pytorch', 'efficientnet', 'image-classification', 'dataset:imagenet-1k', 'arxiv:1905.11946', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
225,bert-base-multilingual-cased,https://arxiv.org/pdf/1810.04805.pdf,https://github.com/google-research/bert/blob/master/multilingual.md,"{'likes': 232, 'downloads': 2767088, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'bert', 'fill-mask', 'multilingual', 'af', 'sq', 'ar', 'an', 'hy', 'ast', 'az', 'ba', 'eu', 'bar', 'be', 'bn', 'inc', 'bs', 'br', 'bg', 'my', 'ca', 'ceb', 'ce', 'zh', 'cv', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fi', 'fr', 'gl', 'ka', 'de', 'el', 'gu', 'ht', 'he', 'hi', 'hu', 'is', 'io', 'id', 'ga', 'it', 'ja', 'jv', 'kn', 'kk', 'ky', 'ko', 'la', 'lv', 'lt', 'roa', 'nds', 'lm', 'mk', 'mg', 'ms', 'ml', 'mr', 'mn', 'min', 'ne', 'new', 'nb', 'nn', 'oc', 'fa', 'pms', 'pl', 'pt', 'pa', 'ro', 'ru', 'sco', 'sr', 'scn', 'sk', 'sl', 'aze', 'es', 'su', 'sw', 'sv', 'tl', 'tg', 'th', 'ta', 'tt', 'te', 'tr', 'uk', 'ud', 'uz', 'vi', 'vo', 'war', 'cy', 'fry', 'pnb', 'yo', 'dataset:wikipedia', 'arxiv:1810.04805', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
226,google/tapas-tiny-finetuned-wtq,https://arxiv.org/pdf/2004.02349.pdf,https://github.com/google-research/tapas/blob/master/README.md,"{'likes': 0, 'downloads': 13320, 'tags': ['transformers', 'pytorch', 'tf', 'tapas', 'table-question-answering', 'en', 'dataset:wtq', 'arxiv:2004.02349', 'arxiv:2010.00571', 'arxiv:1508.00305', 'license:apache-2.0', 'region:us'], 'pipeline_tag': 'table-question-answering', 'class': 'classA'}"
227,facebook/mask2former-swin-tiny-coco-instance,https://arxiv.org/pdf/2112.01527.pdf,https://github.com/facebookresearch/Mask2Former/blob/main/README.md,"{'likes': 2, 'downloads': 6118, 'tags': ['transformers', 'pytorch', 'safetensors', 'mask2former', 'dataset:coco', 'arxiv:2112.01527', 'arxiv:2107.06278', 'vision', 'image-segmentation', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
228,google/tapas-large,https://arxiv.org/pdf/2004.02349.pdf,https://github.com/google-research/tapas/blob/master/README.md,"{'likes': 0, 'downloads': 984, 'tags': ['transformers', 'pytorch', 'tf', 'tapas', 'feature-extraction', 'en', 'arxiv:2004.02349', 'arxiv:2010.00571', 'TapasModel', 'license:apache-2.0', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
229,nvidia/segformer-b2-finetuned-ade-512-512,https://arxiv.org/pdf/2105.15203.pdf,https://github.com/NVlabs/SegFormer/blob/master/README.md,"{'likes': 1, 'downloads': 8286, 'tags': ['transformers', 'pytorch', 'tf', 'segformer', 'dataset:scene_parse_150', 'arxiv:2105.15203', 'vision', 'image-segmentation', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
230,facebook/data2vec-text-base,https://arxiv.org/pdf/2202.03555.pdf,https://github.com/facebookresearch/fairseq/blob/main/examples/data2vec/README.md,"{'likes': 12, 'downloads': 1057, 'tags': ['transformers', 'pytorch', 'data2vec-text', 'feature-extraction', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:2202.03555', 'arxiv:1806.02847', 'exbert', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
231,Salesforce/instructblip-vicuna-13b,https://arxiv.org/pdf/2305.06500.pdf,https://github.com/lm-sys/FastChat/blob/main/README.md,"{'likes': 32, 'downloads': 8027, 'tags': ['transformers', 'pytorch', 'instructblip', 'text2text-generation', 'en', 'arxiv:2305.06500', 'vision', 'image-captioning', 'license:other', 'image-to-text', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
232,facebook/deit-tiny-patch16-224,https://arxiv.org/pdf/2012.12877.pdf,https://github.com/facebookresearch/deit/blob/main/README.md,"{'likes': 1, 'downloads': 8132, 'tags': ['transformers', 'pytorch', 'tf', 'vit', 'image-classification', 'dataset:imagenet', 'arxiv:2012.12877', 'arxiv:2006.03677', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
233,microsoft/swin-large-patch4-window7-224,https://arxiv.org/pdf/2103.14030.pdf,https://github.com/microsoft/Swin-Transformer/blob/main/README.md,"{'likes': 0, 'downloads': 1020, 'tags': ['transformers', 'pytorch', 'tf', 'swin', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2103.14030', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
234,microsoft/swin-base-patch4-window12-384,https://arxiv.org/pdf/2103.14030.pdf,https://github.com/microsoft/Swin-Transformer/blob/main/README.md,"{'likes': 3, 'downloads': 2949, 'tags': ['transformers', 'pytorch', 'tf', 'swin', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2103.14030', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
235,bert-base-multilingual-uncased,https://arxiv.org/pdf/1810.04805.pdf,https://github.com/google-research/bert/blob/master/multilingual.md,"{'likes': 56, 'downloads': 533041, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'bert', 'fill-mask', 'multilingual', 'af', 'sq', 'ar', 'an', 'hy', 'ast', 'az', 'ba', 'eu', 'bar', 'be', 'bn', 'inc', 'bs', 'br', 'bg', 'my', 'ca', 'ceb', 'ce', 'zh', 'cv', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fi', 'fr', 'gl', 'ka', 'de', 'el', 'gu', 'ht', 'he', 'hi', 'hu', 'is', 'io', 'id', 'ga', 'it', 'ja', 'jv', 'kn', 'kk', 'ky', 'ko', 'la', 'lv', 'lt', 'roa', 'nds', 'lm', 'mk', 'mg', 'ms', 'ml', 'mr', 'min', 'ne', 'new', 'nb', 'nn', 'oc', 'fa', 'pms', 'pl', 'pt', 'pa', 'ro', 'ru', 'sco', 'sr', 'scn', 'sk', 'sl', 'aze', 'es', 'su', 'sw', 'sv', 'tl', 'tg', 'ta', 'tt', 'te', 'tr', 'uk', 'ud', 'uz', 'vi', 'vo', 'war', 'cy', 'fry', 'pnb', 'yo', 'dataset:wikipedia', 'arxiv:1810.04805', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
236,facebook/vit-mae-large,https://arxiv.org/pdf/2111.06377.pdf,https://github.com/facebookresearch/mae/blob/main/README.md,"{'likes': 4, 'downloads': 3426, 'tags': ['transformers', 'pytorch', 'tf', 'vit_mae', 'pretraining', 'dataset:imagenet-1k', 'arxiv:2111.06377', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': None, 'class': 'classA'}"
237,facebook/convnext-xlarge-224-22k,https://arxiv.org/pdf/2201.03545.pdf,https://github.com/facebookresearch/ConvNeXt/blob/main/README.md,"{'likes': 0, 'downloads': 1658, 'tags': ['transformers', 'pytorch', 'tf', 'convnext', 'image-classification', 'dataset:imagenet-21k', 'arxiv:2201.03545', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
238,openai/whisper-tiny,https://arxiv.org/pdf/2212.04356.pdf,https://github.com/openai/whisper/blob/main/README.md,"{'likes': 106, 'downloads': 154790, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'whisper', 'automatic-speech-recognition', 'audio', 'hf-asr-leaderboard', 'en', 'zh', 'de', 'es', 'ru', 'ko', 'fr', 'ja', 'pt', 'tr', 'pl', 'ca', 'nl', 'ar', 'sv', 'it', 'id', 'hi', 'fi', 'vi', 'he', 'uk', 'el', 'ms', 'cs', 'ro', 'da', 'hu', 'ta', 'no', 'th', 'ur', 'hr', 'bg', 'lt', 'la', 'mi', 'ml', 'cy', 'sk', 'te', 'fa', 'lv', 'bn', 'sr', 'az', 'sl', 'kn', 'et', 'mk', 'br', 'eu', 'is', 'hy', 'ne', 'mn', 'bs', 'kk', 'sq', 'sw', 'gl', 'mr', 'pa', 'si', 'km', 'sn', 'yo', 'so', 'af', 'oc', 'ka', 'be', 'tg', 'sd', 'gu', 'am', 'yi', 'lo', 'uz', 'fo', 'ht', 'ps', 'tk', 'nn', 'mt', 'sa', 'lb', 'my', 'bo', 'tl', 'mg', 'as', 'tt', 'haw', 'ln', 'ha', 'ba', 'jw', 'su', 'arxiv:2212.04356', 'license:apache-2.0', 'model-index', 'endpoints_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'automatic-speech-recognition', 'class': 'classA'}"
239,google/bigbird-roberta-base,https://arxiv.org/pdf/2007.14062.pdf,https://github.com/google-research/bigbird/blob/master/README.md,"{'likes': 38, 'downloads': 20012, 'tags': ['transformers', 'pytorch', 'jax', 'big_bird', 'pretraining', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'dataset:cc_news', 'arxiv:2007.14062', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': None, 'class': 'classA'}"
240,microsoft/focalnet-tiny,https://arxiv.org/pdf/2203.11926.pdf,https://github.com/microsoft/FocalNet/blob/main/README.md,"{'likes': 0, 'downloads': 514, 'tags': ['transformers', 'pytorch', 'focalnet', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2203.11926', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
241,deepmind/vision-perceiver-conv,https://arxiv.org/pdf/2107.14795.pdf,https://github.com/google-deepmind/deepmind-research/blob/master/perceiver/README.md,"{'likes': 5, 'downloads': 927, 'tags': ['transformers', 'pytorch', 'perceiver', 'image-classification', 'dataset:imagenet', 'arxiv:2107.14795', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
242,Salesforce/blip2-opt-2.7b-coco,https://arxiv.org/pdf/2301.12597.pdf,https://github.com/salesforce/LAVIS/blob/main/projects/blip2/README.md,"{'likes': 4, 'downloads': 6639, 'tags': ['transformers', 'pytorch', 'blip-2', 'visual-question-answering', 'en', 'arxiv:2301.12597', 'vision', 'image-to-text', 'image-captioning', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
243,microsoft/speecht5_tts,https://arxiv.org/pdf/2110.07205.pdf,https://github.com/microsoft/SpeechT5/blob/main/README.md,"{'likes': 270, 'downloads': 53846, 'tags': ['transformers', 'pytorch', 'speecht5', 'dataset:libritts', 'arxiv:2110.07205', 'arxiv:1910.09700', 'audio', 'text-to-speech', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'text-to-speech', 'class': 'classA'}"
244,microsoft/git-large-vqav2,https://arxiv.org/pdf/2205.14100.pdf,https://github.com/microsoft/GenerativeImage2Text/blob/main/README.md,"{'likes': 8, 'downloads': 1123, 'tags': ['transformers', 'pytorch', 'safetensors', 'git', 'text-generation', 'en', 'arxiv:2205.14100', 'vision', 'license:mit', 'visual-question-answering', 'has_space', 'region:us'], 'pipeline_tag': 'visual-question-answering', 'class': 'classA'}"
245,facebook/bart-base,https://arxiv.org/pdf/1910.13461.pdf,https://github.com/facebookresearch/fairseq/blob/main/examples/bart/README.md,"{'likes': 103, 'downloads': 611585, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'bart', 'feature-extraction', 'en', 'arxiv:1910.13461', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
246,google/mobilenet_v2_0.35_96,https://arxiv.org/pdf/1801.04381.pdf,https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/README.md,"{'likes': 0, 'downloads': 404, 'tags': ['transformers', 'pytorch', 'mobilenet_v2', 'image-classification', 'dataset:imagenet-1k', 'arxiv:1801.04381', 'vision', 'license:other', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
247,Salesforce/blip2-opt-6.7b-coco,https://arxiv.org/pdf/2301.12597.pdf,https://github.com/salesforce/LAVIS/blob/main/projects/blip2/README.md,"{'likes': 16, 'downloads': 3657, 'tags': ['transformers', 'pytorch', 'blip-2', 'visual-question-answering', 'en', 'arxiv:2301.12597', 'vision', 'image-to-text', 'image-captioning', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
248,microsoft/beit-base-patch16-224-pt22k-ft22k,https://arxiv.org/pdf/2106.08254.pdf,https://github.com/microsoft/unilm/blob/master/beit/README.md,"{'likes': 47, 'downloads': 671685, 'tags': ['transformers', 'pytorch', 'jax', 'beit', 'image-classification', 'dataset:imagenet', 'dataset:imagenet-21k', 'arxiv:2106.08254', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
249,facebook/mask2former-swin-small-coco-instance,https://arxiv.org/pdf/2112.01527.pdf,https://github.com/facebookresearch/Mask2Former/blob/main/README.md,"{'likes': 0, 'downloads': 5641, 'tags': ['transformers', 'pytorch', 'safetensors', 'mask2former', 'dataset:coco', 'arxiv:2112.01527', 'arxiv:2107.06278', 'vision', 'image-segmentation', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
250,facebook/detr-resnet-101,https://arxiv.org/pdf/2005.12872.pdf,https://github.com/facebookresearch/detr/blob/main/README.md,"{'likes': 64, 'downloads': 55526, 'tags': ['transformers', 'pytorch', 'safetensors', 'detr', 'object-detection', 'dataset:coco', 'arxiv:2005.12872', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'object-detection', 'class': 'classA'}"
251,microsoft/focalnet-base,https://arxiv.org/pdf/2203.11926.pdf,https://github.com/microsoft/FocalNet/blob/main/README.md,"{'likes': 0, 'downloads': 383, 'tags': ['transformers', 'pytorch', 'focalnet', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2203.11926', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
252,facebook/vit-msn-small,https://arxiv.org/pdf/2204.07141.pdf,https://github.com/facebookresearch/msn/blob/main/README.md,"{'likes': 1, 'downloads': 786, 'tags': ['transformers', 'pytorch', 'vit_msn', 'feature-extraction', 'dataset:imagenet-1k', 'arxiv:2204.07141', 'vision', 'license:apache-2.0', 'region:us', 'has_space'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
253,microsoft/trocr-large-stage1,https://arxiv.org/pdf/2109.10282.pdf,https://github.com/microsoft/unilm/blob/master/trocr/README.md,"{'likes': 9, 'downloads': 174597, 'tags': ['transformers', 'pytorch', 'vision-encoder-decoder', 'arxiv:2109.10282', 'trocr', 'image-to-text', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
254,openai/whisper-large,https://arxiv.org/pdf/2212.04356.pdf,https://github.com/openai/whisper/blob/main/README.md,"{'likes': 349, 'downloads': 176858, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'whisper', 'automatic-speech-recognition', 'audio', 'hf-asr-leaderboard', 'en', 'zh', 'de', 'es', 'ru', 'ko', 'fr', 'ja', 'pt', 'tr', 'pl', 'ca', 'nl', 'ar', 'sv', 'it', 'id', 'hi', 'fi', 'vi', 'he', 'uk', 'el', 'ms', 'cs', 'ro', 'da', 'hu', 'ta', 'no', 'th', 'ur', 'hr', 'bg', 'lt', 'la', 'mi', 'ml', 'cy', 'sk', 'te', 'fa', 'lv', 'bn', 'sr', 'az', 'sl', 'kn', 'et', 'mk', 'br', 'eu', 'is', 'hy', 'ne', 'mn', 'bs', 'kk', 'sq', 'sw', 'gl', 'mr', 'pa', 'si', 'km', 'sn', 'yo', 'so', 'af', 'oc', 'ka', 'be', 'tg', 'sd', 'gu', 'am', 'yi', 'lo', 'uz', 'fo', 'ht', 'ps', 'tk', 'nn', 'mt', 'sa', 'lb', 'my', 'bo', 'tl', 'mg', 'as', 'tt', 'haw', 'ln', 'ha', 'ba', 'jw', 'su', 'arxiv:2212.04356', 'license:apache-2.0', 'model-index', 'endpoints_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'automatic-speech-recognition', 'class': 'classA'}"
255,google/long-t5-tglobal-large,https://arxiv.org/pdf/2112.07916.pdf,https://github.com/google-research/longt5/blob/master/README.md,"{'likes': 8, 'downloads': 1709, 'tags': ['transformers', 'pytorch', 'jax', 'safetensors', 'longt5', 'text2text-generation', 'en', 'arxiv:2112.07916', 'arxiv:1912.08777', 'arxiv:1910.10683', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
256,google/vit-large-patch16-384,https://arxiv.org/pdf/2010.11929.pdf,https://github.com/google-research/vision_transformer/blob/main/README.md,"{'likes': 7, 'downloads': 2606, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'vit', 'image-classification', 'dataset:imagenet', 'dataset:imagenet-21k', 'arxiv:2010.11929', 'arxiv:2006.03677', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
257,openai/whisper-base,https://arxiv.org/pdf/2212.04356.pdf,https://github.com/openai/whisper/blob/main/README.md,"{'likes': 122, 'downloads': 55782, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'safetensors', 'whisper', 'automatic-speech-recognition', 'audio', 'hf-asr-leaderboard', 'en', 'zh', 'de', 'es', 'ru', 'ko', 'fr', 'ja', 'pt', 'tr', 'pl', 'ca', 'nl', 'ar', 'sv', 'it', 'id', 'hi', 'fi', 'vi', 'he', 'uk', 'el', 'ms', 'cs', 'ro', 'da', 'hu', 'ta', 'no', 'th', 'ur', 'hr', 'bg', 'lt', 'la', 'mi', 'ml', 'cy', 'sk', 'te', 'fa', 'lv', 'bn', 'sr', 'az', 'sl', 'kn', 'et', 'mk', 'br', 'eu', 'is', 'hy', 'ne', 'mn', 'bs', 'kk', 'sq', 'sw', 'gl', 'mr', 'pa', 'si', 'km', 'sn', 'yo', 'so', 'af', 'oc', 'ka', 'be', 'tg', 'sd', 'gu', 'am', 'yi', 'lo', 'uz', 'fo', 'ht', 'ps', 'tk', 'nn', 'mt', 'sa', 'lb', 'my', 'bo', 'tl', 'mg', 'as', 'tt', 'haw', 'ln', 'ha', 'ba', 'jw', 'su', 'arxiv:2212.04356', 'license:apache-2.0', 'model-index', 'endpoints_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'automatic-speech-recognition', 'class': 'classA'}"
258,Salesforce/blip2-opt-2.7b,https://arxiv.org/pdf/2301.12597.pdf,https://github.com/salesforce/LAVIS/blob/main/projects/blip2/README.md,"{'likes': 140, 'downloads': 160435, 'tags': ['transformers', 'pytorch', 'blip-2', 'visual-question-answering', 'en', 'arxiv:2301.12597', 'vision', 'image-to-text', 'image-captioning', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
259,google/vivit-b-16x2-kinetics400,https://arxiv.org/pdf/2103.15691.pdf,https://github.com/google-research/scenic/blob/main/scenic/projects/vivit/README.md,"{'likes': 6, 'downloads': 6163, 'tags': ['transformers', 'pytorch', 'vivit', 'arxiv:2103.15691', 'vision', 'video-classification', 'license:mit', 'region:us', 'has_space'], 'pipeline_tag': 'video-classification', 'class': 'classA'}"
260,facebook/sam-vit-base,https://arxiv.org/pdf/2304.02643.pdf,https://github.com/facebookresearch/segment-anything/blob/main/README.md,"{'likes': 32, 'downloads': 59557, 'tags': ['transformers', 'pytorch', 'tf', 'sam', 'mask-generation', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': None, 'class': 'classA'}"
261,facebook/opt-13b,https://arxiv.org/pdf/2205.01068.pdf,https://github.com/facebookresearch/metaseq/blob/main/README.md,"{'likes': 57, 'downloads': 25648, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'opt', 'text-generation', 'en', 'arxiv:2205.01068', 'arxiv:2005.14165', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'text-generation', 'class': 'classA'}"
262,bert-large-cased-whole-word-masking-finetuned-squad,https://arxiv.org/pdf/1810.04805.pdf,https://github.com/google-research/bert/blob/master/README.md,"{'likes': 1, 'downloads': 28175, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'rust', 'safetensors', 'bert', 'question-answering', 'en', 'dataset:bookcorpus', 'dataset:wikipedia', 'arxiv:1810.04805', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'question-answering', 'class': 'classA'}"
263,google/flan-t5-small,https://arxiv.org/pdf/2210.11416.pdf,https://github.com/google-research/t5x/blob/main/README.md,"{'likes': 127, 'downloads': 268473, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 't5', 'text2text-generation', 'en', 'fr', 'ro', 'de', 'multilingual', 'dataset:svakulenk0/qrecc', 'dataset:taskmaster2', 'dataset:djaym7/wiki_dialog', 'dataset:deepmind/code_contests', 'dataset:lambada', 'dataset:gsm8k', 'dataset:aqua_rat', 'dataset:esnli', 'dataset:quasc', 'dataset:qed', 'arxiv:2210.11416', 'arxiv:1910.09700', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'text-generation-inference', 'region:us'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
264,google/bigbird-pegasus-large-pubmed,https://arxiv.org/pdf/2007.14062.pdf,https://github.com/google-research/bigbird/blob/master/README.md,"{'likes': 29, 'downloads': 1779, 'tags': ['transformers', 'pytorch', 'bigbird_pegasus', 'text2text-generation', 'en', 'dataset:scientific_papers', 'arxiv:2007.14062', 'summarization', 'license:apache-2.0', 'model-index', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'summarization', 'class': 'classA'}"
265,google/long-t5-tglobal-xl,https://arxiv.org/pdf/2112.07916.pdf,https://github.com/google-research/longt5/blob/master/README.md,"{'likes': 18, 'downloads': 808, 'tags': ['transformers', 'pytorch', 'jax', 'longt5', 'text2text-generation', 'en', 'arxiv:2112.07916', 'arxiv:1912.08777', 'arxiv:1910.10683', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
266,microsoft/beit-large-patch16-512,https://arxiv.org/pdf/2106.08254.pdf,https://github.com/microsoft/unilm/blob/master/beit/README.md,"{'likes': 7, 'downloads': 4960, 'tags': ['transformers', 'pytorch', 'jax', 'beit', 'image-classification', 'dataset:imagenet', 'dataset:imagenet-21k', 'arxiv:2106.08254', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
267,microsoft/swinv2-tiny-patch4-window8-256,https://arxiv.org/pdf/2111.09883.pdf,https://github.com/microsoft/Swin-Transformer/blob/main/README.md,"{'likes': 3, 'downloads': 3841, 'tags': ['transformers', 'pytorch', 'swinv2', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2111.09883', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
268,google/mobilenet_v2_1.4_224,https://arxiv.org/pdf/1801.04381.pdf,https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/README.md,"{'likes': 1, 'downloads': 407, 'tags': ['transformers', 'pytorch', 'mobilenet_v2', 'image-classification', 'dataset:imagenet-1k', 'arxiv:1801.04381', 'vision', 'license:other', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
269,hustvl/yolos-small,https://arxiv.org/pdf/2106.00666.pdf,https://github.com/hustvl/YOLOS/blob/main/README.md,"{'likes': 35, 'downloads': 63969, 'tags': ['transformers', 'pytorch', 'yolos', 'object-detection', 'dataset:coco', 'arxiv:2106.00666', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'object-detection', 'class': 'classA'}"
270,Salesforce/dialogstudio-t5-base-v1.0,https://arxiv.org/pdf/2307.10172.pdf,https://github.com/salesforce/DialogStudio/blob/main/README.md,"{'likes': 2, 'downloads': 405, 'tags': ['transformers', 'pytorch', 'safetensors', 't5', 'text2text-generation', 'en', 'fr', 'ro', 'de', 'multilingual', 'dataset:Salesforce/dialogstudio', 'dataset:flan', 'arxiv:2307.10172', 'arxiv:2210.11416', 'dialog', 'license:apache-2.0', 'autotrain_compatible', 'text-generation-inference', 'region:us', 'has_space'], 'pipeline_tag': 'text2text-generation', 'class': 'classA'}"
271,google/efficientnet-b7,https://arxiv.org/pdf/1905.11946.pdf,https://github.com/keras-team/keras/blob/master/README.md,"{'likes': 5, 'downloads': 36254, 'tags': ['transformers', 'pytorch', 'efficientnet', 'image-classification', 'dataset:imagenet-1k', 'arxiv:1905.11946', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
272,facebook/xlm-v-base,https://arxiv.org/pdf/2301.10472.pdf,https://github.com/stefan-it/xlm-v-experiments/blob/main/README.md,"{'likes': 31, 'downloads': 1363, 'tags': ['transformers', 'pytorch', 'xlm-roberta', 'fill-mask', 'multilingual', 'af', 'am', 'ar', 'as', 'az', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'cs', 'cy', 'da', 'de', 'el', 'en', 'eo', 'es', 'et', 'eu', 'fa', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'hu', 'hy', 'id', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'ku', 'ky', 'la', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'om', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sa', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'su', 'sv', 'sw', 'ta', 'te', 'th', 'tl', 'tr', 'ug', 'uk', 'ur', 'uz', 'vi', 'xh', 'yi', 'zh', 'arxiv:2301.10472', 'license:mit', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
273,MIT/ast-finetuned-speech-commands-v2,https://arxiv.org/pdf/2104.01778.pdf,https://github.com/YuanGongND/ast/blob/master/README.md,"{'likes': 5, 'downloads': 2065, 'tags': ['transformers', 'pytorch', 'safetensors', 'audio-spectrogram-transformer', 'audio-classification', 'dataset:speech_commands', 'arxiv:2104.01778', 'license:bsd-3-clause', 'model-index', 'has_space', 'region:us'], 'pipeline_tag': 'audio-classification', 'class': 'classA'}"
274,facebook/convnextv2-tiny-22k-384,https://arxiv.org/pdf/2301.00808.pdf,https://github.com/facebookresearch/ConvNeXt-V2/blob/main/README.md,"{'likes': 1, 'downloads': 83239, 'tags': ['transformers', 'pytorch', 'tf', 'convnextv2', 'image-classification', 'vision', 'dataset:imagenet-22k', 'arxiv:2301.00808', 'license:apache-2.0', 'autotrain_compatible', 'endpoints_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
275,naver-clova-ix/donut-base,https://arxiv.org/pdf/2111.15664.pdf,https://github.com/clovaai/donut/blob/master/README.md,"{'likes': 88, 'downloads': 20655, 'tags': ['transformers', 'pytorch', 'vision-encoder-decoder', 'arxiv:2111.15664', 'donut', 'image-to-text', 'vision', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
276,facebook/dinov2-base,https://arxiv.org/pdf/2304.07193.pdf,https://github.com/facebookresearch/dinov2/blob/main/README.md,"{'likes': 12, 'downloads': 49570, 'tags': ['transformers', 'pytorch', 'safetensors', 'dinov2', 'feature-extraction', 'arxiv:2304.07193', 'dino', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
277,Salesforce/blip2-flan-t5-xxl,https://arxiv.org/pdf/2301.12597.pdf,https://github.com/salesforce/LAVIS/blob/main/projects/blip2/README.md,"{'likes': 56, 'downloads': 30654, 'tags': ['transformers', 'pytorch', 'blip-2', 'visual-question-answering', 'en', 'arxiv:2301.12597', 'arxiv:2210.11416', 'vision', 'image-to-text', 'image-captioning', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'image-to-text', 'class': 'classA'}"
278,facebook/convnextv2-huge-22k-512,https://arxiv.org/pdf/2301.00808.pdf,https://github.com/facebookresearch/ConvNeXt-V2/blob/main/README.md,"{'likes': 2, 'downloads': 378, 'tags': ['transformers', 'pytorch', 'convnextv2', 'image-classification', 'dataset:imagenet-22k', 'arxiv:2301.00808', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
279,facebook/sam-vit-huge,https://arxiv.org/pdf/2304.02643.pdf,https://github.com/facebookresearch/segment-anything/blob/main/README.md,"{'likes': 43, 'downloads': 27342, 'tags': ['transformers', 'pytorch', 'tf', 'sam', 'mask-generation', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': None, 'class': 'classA'}"
280,facebook/mask2former-swin-large-cityscapes-panoptic,https://arxiv.org/pdf/2112.01527.pdf,https://github.com/facebookresearch/Mask2Former/blob/main/README.md,"{'likes': 0, 'downloads': 561, 'tags': ['transformers', 'pytorch', 'safetensors', 'mask2former', 'dataset:coco', 'arxiv:2112.01527', 'arxiv:2107.06278', 'vision', 'image-segmentation', 'license:other', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
281,glrh11/vit-base-patch16-224,https://arxiv.org/pdf/2010.11929.pdf,https://github.com/google-research/vision_transformer/blob/main/README.md,"{'likes': 0, 'downloads': 1995, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'vit', 'image-classification', 'dataset:imagenet-1k', 'dataset:imagenet-21k', 'arxiv:2010.11929', 'arxiv:2006.03677', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
282,microsoft/speecht5_vc,https://arxiv.org/pdf/2110.07205.pdf,https://github.com/microsoft/SpeechT5/blob/main/README.md,"{'likes': 36, 'downloads': 16661, 'tags': ['transformers', 'pytorch', 'speecht5', 'dataset:cmu-arctic', 'arxiv:2110.07205', 'audio', 'audio-to-audio', 'license:mit', 'has_space', 'region:us'], 'pipeline_tag': 'audio-to-audio', 'class': 'classA'}"
283,microsoft/swinv2-large-patch4-window12to16-192to256-22kto1k-ft,https://arxiv.org/pdf/2111.09883.pdf,https://github.com/microsoft/Swin-Transformer/blob/main/README.md,"{'likes': 0, 'downloads': 12586, 'tags': ['transformers', 'pytorch', 'swinv2', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2111.09883', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
284,facebook/deit-base-patch16-224,https://arxiv.org/pdf/2012.12877.pdf,https://github.com/facebookresearch/deit/blob/main/README.md,"{'likes': 8, 'downloads': 6165, 'tags': ['transformers', 'pytorch', 'tf', 'vit', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2012.12877', 'arxiv:2006.03677', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
285,facebook/opt-1.3b,https://arxiv.org/pdf/2205.01068.pdf,https://github.com/facebookresearch/metaseq/blob/main/README.md,"{'likes': 114, 'downloads': 728826, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'opt', 'text-generation', 'en', 'arxiv:2205.01068', 'arxiv:2005.14165', 'license:other', 'has_space', 'text-generation-inference', 'region:us'], 'pipeline_tag': 'text-generation', 'class': 'classA'}"
286,facebook/mask2former-swin-tiny-coco-panoptic,https://arxiv.org/pdf/2112.01527.pdf,https://github.com/facebookresearch/Mask2Former/blob/main/README.md,"{'likes': 1, 'downloads': 768, 'tags': ['transformers', 'pytorch', 'safetensors', 'mask2former', 'dataset:coco', 'arxiv:2112.01527', 'arxiv:2107.06278', 'vision', 'image-segmentation', 'license:other', 'has_space', 'region:us'], 'pipeline_tag': 'image-segmentation', 'class': 'classA'}"
287,google/vit-base-patch16-224-in21k,https://arxiv.org/pdf/2010.11929.pdf,https://github.com/google-research/vision_transformer/blob/main/README.md,"{'likes': 64, 'downloads': 3385763, 'tags': ['transformers', 'pytorch', 'tf', 'jax', 'vit', 'feature-extraction', 'dataset:imagenet-21k', 'arxiv:2010.11929', 'arxiv:2006.03677', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'feature-extraction', 'class': 'classA'}"
288,facebook/convnextv2-base-22k-224,https://arxiv.org/pdf/2301.00808.pdf,https://github.com/facebookresearch/ConvNeXt-V2/blob/main/README.md,"{'likes': 2, 'downloads': 423, 'tags': ['transformers', 'pytorch', 'convnextv2', 'image-classification', 'dataset:imagenet-22k', 'arxiv:2301.00808', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
289,microsoft/swin-base-patch4-window7-224-in22k,https://arxiv.org/pdf/2103.14030.pdf,https://github.com/microsoft/Swin-Transformer/blob/main/README.md,"{'likes': 9, 'downloads': 3753, 'tags': ['transformers', 'pytorch', 'tf', 'safetensors', 'swin', 'image-classification', 'dataset:imagenet-21k', 'arxiv:2103.14030', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'has_space', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
290,wanglab/medsam-vit-base,https://arxiv.org/pdf/2304.02643.pdf,https://github.com/bowang-lab/MedSAM/blob/main/README.md,"{'likes': 8, 'downloads': 447, 'tags': ['transformers', 'pytorch', 'sam', 'mask-generation', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': None, 'class': 'classA'}"
291,microsoft/cvt-21-384-22k,https://arxiv.org/pdf/2103.15808.pdf,https://github.com/microsoft/CvT/blob/main/README.md,"{'likes': 1, 'downloads': 5984, 'tags': ['transformers', 'pytorch', 'tf', 'cvt', 'image-classification', 'dataset:imagenet-1k', 'arxiv:2103.15808', 'vision', 'license:apache-2.0', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'image-classification', 'class': 'classA'}"
292,facebook/xlm-roberta-xl,https://arxiv.org/pdf/2105.00572.pdf,https://github.com/facebookresearch/fairseq/blob/main/examples/xlmr/README.md,"{'likes': 13, 'downloads': 3161, 'tags': ['transformers', 'pytorch', 'xlm-roberta-xl', 'fill-mask', 'multilingual', 'af', 'am', 'ar', 'as', 'az', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'cs', 'cy', 'da', 'de', 'el', 'en', 'eo', 'es', 'et', 'eu', 'fa', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'hu', 'hy', 'id', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'ku', 'ky', 'la', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'om', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sa', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'su', 'sv', 'sw', 'ta', 'te', 'th', 'tl', 'tr', 'ug', 'uk', 'ur', 'uz', 'vi', 'xh', 'yi', 'zh', 'arxiv:2105.00572', 'license:mit', 'autotrain_compatible', 'region:us'], 'pipeline_tag': 'fill-mask', 'class': 'classA'}"
293,SenseTime/deformable-detr,https://arxiv.org/pdf/2010.04159.pdf,https://github.com/fundamentalvision/Deformable-DETR/blob/main/README.md,"{'likes': 10, 'downloads': 9424, 'tags': ['transformers', 'pytorch', 'safetensors', 'deformable_detr', 'object-detection', 'dataset:coco', 'arxiv:2010.04159', 'vision', 'license:apache-2.0', 'has_space', 'region:us'], 'pipeline_tag': 'object-detection', 'class': 'classA'}"
|