modelId
stringlengths
5
122
author
stringlengths
2
42
last_modified
unknown
downloads
int64
0
75.3M
likes
int64
0
10.6k
library_name
stringclasses
189 values
tags
sequencelengths
1
1.84k
pipeline_tag
stringclasses
48 values
createdAt
unknown
card
stringlengths
1
901k
TinyLlama/TinyLlama-1.1B-Chat-v1.0
TinyLlama
"2024-03-17T05:07:08Z"
579,780
894
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "en", "dataset:cerebras/SlimPajama-627B", "dataset:bigcode/starcoderdata", "dataset:HuggingFaceH4/ultrachat_200k", "dataset:HuggingFaceH4/ultrafeedback_binarized", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2023-12-30T06:27:30Z"
--- license: apache-2.0 datasets: - cerebras/SlimPajama-627B - bigcode/starcoderdata - HuggingFaceH4/ultrachat_200k - HuggingFaceH4/ultrafeedback_binarized language: - en widget: - example_title: Fibonacci (Python) messages: - role: system content: You are a chatbot who can help code! - role: user content: Write me a function to calculate the first 10 digits of the fibonacci sequence in Python and print it out to the CLI. --- <div align="center"> # TinyLlama-1.1B </div> https://github.com/jzhang38/TinyLlama The TinyLlama project aims to **pretrain** a **1.1B Llama model on 3 trillion tokens**. With some proper optimization, we can achieve this within a span of "just" 90 days using 16 A100-40G GPUs 🚀🚀. The training has started on 2023-09-01. We adopted exactly the same architecture and tokenizer as Llama 2. This means TinyLlama can be plugged and played in many open-source projects built upon Llama. Besides, TinyLlama is compact with only 1.1B parameters. This compactness allows it to cater to a multitude of applications demanding a restricted computation and memory footprint. #### This Model This is the chat model finetuned on top of [TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T](https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T). **We follow [HF's Zephyr](https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha)'s training recipe.** The model was " initially fine-tuned on a variant of the [`UltraChat`](https://huggingface.co/datasets/stingning/ultrachat) dataset, which contains a diverse range of synthetic dialogues generated by ChatGPT. We then further aligned the model with [🤗 TRL's](https://github.com/huggingface/trl) `DPOTrainer` on the [openbmb/UltraFeedback](https://huggingface.co/datasets/openbmb/UltraFeedback) dataset, which contain 64k prompts and model completions that are ranked by GPT-4." #### How to use You will need the transformers>=4.34 Do check the [TinyLlama](https://github.com/jzhang38/TinyLlama) github page for more information. ```python # Install transformers from source - only needed for versions <= v4.34 # pip install git+https://github.com/huggingface/transformers.git # pip install accelerate import torch from transformers import pipeline pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto") # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating messages = [ { "role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate", }, {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, ] prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) # <|system|> # You are a friendly chatbot who always responds in the style of a pirate.</s> # <|user|> # How many helicopters can a human eat in one sitting?</s> # <|assistant|> # ... ```
facebook/nllb-200-distilled-600M
facebook
"2024-02-14T17:18:36Z"
576,319
354
transformers
[ "transformers", "pytorch", "m2m_100", "text2text-generation", "nllb", "translation", "ace", "acm", "acq", "aeb", "af", "ajp", "ak", "als", "am", "apc", "ar", "ars", "ary", "arz", "as", "ast", "awa", "ayr", "azb", "azj", "ba", "bm", "ban", "be", "bem", "bn", "bho", "bjn", "bo", "bs", "bug", "bg", "ca", "ceb", "cs", "cjk", "ckb", "crh", "cy", "da", "de", "dik", "dyu", "dz", "el", "en", "eo", "et", "eu", "ee", "fo", "fj", "fi", "fon", "fr", "fur", "fuv", "gaz", "gd", "ga", "gl", "gn", "gu", "ht", "ha", "he", "hi", "hne", "hr", "hu", "hy", "ig", "ilo", "id", "is", "it", "jv", "ja", "kab", "kac", "kam", "kn", "ks", "ka", "kk", "kbp", "kea", "khk", "km", "ki", "rw", "ky", "kmb", "kmr", "knc", "kg", "ko", "lo", "lij", "li", "ln", "lt", "lmo", "ltg", "lb", "lua", "lg", "luo", "lus", "lvs", "mag", "mai", "ml", "mar", "min", "mk", "mt", "mni", "mos", "mi", "my", "nl", "nn", "nb", "npi", "nso", "nus", "ny", "oc", "ory", "pag", "pa", "pap", "pbt", "pes", "plt", "pl", "pt", "prs", "quy", "ro", "rn", "ru", "sg", "sa", "sat", "scn", "shn", "si", "sk", "sl", "sm", "sn", "sd", "so", "st", "es", "sc", "sr", "ss", "su", "sv", "swh", "szl", "ta", "taq", "tt", "te", "tg", "tl", "th", "ti", "tpi", "tn", "ts", "tk", "tum", "tr", "tw", "tzm", "ug", "uk", "umb", "ur", "uzn", "vec", "vi", "war", "wo", "xh", "ydd", "yo", "yue", "zh", "zsm", "zu", "dataset:flores-200", "license:cc-by-nc-4.0", "autotrain_compatible", "has_space", "region:us" ]
translation
"2022-07-08T09:43:57Z"
--- language: - ace - acm - acq - aeb - af - ajp - ak - als - am - apc - ar - ars - ary - arz - as - ast - awa - ayr - azb - azj - ba - bm - ban - be - bem - bn - bho - bjn - bo - bs - bug - bg - ca - ceb - cs - cjk - ckb - crh - cy - da - de - dik - dyu - dz - el - en - eo - et - eu - ee - fo - fj - fi - fon - fr - fur - fuv - gaz - gd - ga - gl - gn - gu - ht - ha - he - hi - hne - hr - hu - hy - ig - ilo - id - is - it - jv - ja - kab - kac - kam - kn - ks - ka - kk - kbp - kea - khk - km - ki - rw - ky - kmb - kmr - knc - kg - ko - lo - lij - li - ln - lt - lmo - ltg - lb - lua - lg - luo - lus - lvs - mag - mai - ml - mar - min - mk - mt - mni - mos - mi - my - nl - nn - nb - npi - nso - nus - ny - oc - ory - pag - pa - pap - pbt - pes - plt - pl - pt - prs - quy - ro - rn - ru - sg - sa - sat - scn - shn - si - sk - sl - sm - sn - sd - so - st - es - sc - sr - ss - su - sv - swh - szl - ta - taq - tt - te - tg - tl - th - ti - tpi - tn - ts - tk - tum - tr - tw - tzm - ug - uk - umb - ur - uzn - vec - vi - war - wo - xh - ydd - yo - yue - zh - zsm - zu language_details: "ace_Arab, ace_Latn, acm_Arab, acq_Arab, aeb_Arab, afr_Latn, ajp_Arab, aka_Latn, amh_Ethi, apc_Arab, arb_Arab, ars_Arab, ary_Arab, arz_Arab, asm_Beng, ast_Latn, awa_Deva, ayr_Latn, azb_Arab, azj_Latn, bak_Cyrl, bam_Latn, ban_Latn,bel_Cyrl, bem_Latn, ben_Beng, bho_Deva, bjn_Arab, bjn_Latn, bod_Tibt, bos_Latn, bug_Latn, bul_Cyrl, cat_Latn, ceb_Latn, ces_Latn, cjk_Latn, ckb_Arab, crh_Latn, cym_Latn, dan_Latn, deu_Latn, dik_Latn, dyu_Latn, dzo_Tibt, ell_Grek, eng_Latn, epo_Latn, est_Latn, eus_Latn, ewe_Latn, fao_Latn, pes_Arab, fij_Latn, fin_Latn, fon_Latn, fra_Latn, fur_Latn, fuv_Latn, gla_Latn, gle_Latn, glg_Latn, grn_Latn, guj_Gujr, hat_Latn, hau_Latn, heb_Hebr, hin_Deva, hne_Deva, hrv_Latn, hun_Latn, hye_Armn, ibo_Latn, ilo_Latn, ind_Latn, isl_Latn, ita_Latn, jav_Latn, jpn_Jpan, kab_Latn, kac_Latn, kam_Latn, kan_Knda, kas_Arab, kas_Deva, kat_Geor, knc_Arab, knc_Latn, kaz_Cyrl, kbp_Latn, kea_Latn, khm_Khmr, kik_Latn, kin_Latn, kir_Cyrl, kmb_Latn, kon_Latn, kor_Hang, kmr_Latn, lao_Laoo, lvs_Latn, lij_Latn, lim_Latn, lin_Latn, lit_Latn, lmo_Latn, ltg_Latn, ltz_Latn, lua_Latn, lug_Latn, luo_Latn, lus_Latn, mag_Deva, mai_Deva, mal_Mlym, mar_Deva, min_Latn, mkd_Cyrl, plt_Latn, mlt_Latn, mni_Beng, khk_Cyrl, mos_Latn, mri_Latn, zsm_Latn, mya_Mymr, nld_Latn, nno_Latn, nob_Latn, npi_Deva, nso_Latn, nus_Latn, nya_Latn, oci_Latn, gaz_Latn, ory_Orya, pag_Latn, pan_Guru, pap_Latn, pol_Latn, por_Latn, prs_Arab, pbt_Arab, quy_Latn, ron_Latn, run_Latn, rus_Cyrl, sag_Latn, san_Deva, sat_Beng, scn_Latn, shn_Mymr, sin_Sinh, slk_Latn, slv_Latn, smo_Latn, sna_Latn, snd_Arab, som_Latn, sot_Latn, spa_Latn, als_Latn, srd_Latn, srp_Cyrl, ssw_Latn, sun_Latn, swe_Latn, swh_Latn, szl_Latn, tam_Taml, tat_Cyrl, tel_Telu, tgk_Cyrl, tgl_Latn, tha_Thai, tir_Ethi, taq_Latn, taq_Tfng, tpi_Latn, tsn_Latn, tso_Latn, tuk_Latn, tum_Latn, tur_Latn, twi_Latn, tzm_Tfng, uig_Arab, ukr_Cyrl, umb_Latn, urd_Arab, uzn_Latn, vec_Latn, vie_Latn, war_Latn, wol_Latn, xho_Latn, ydd_Hebr, yor_Latn, yue_Hant, zho_Hans, zho_Hant, zul_Latn" pipeline_tag: translation tags: - nllb license: "cc-by-nc-4.0" datasets: - flores-200 metrics: - bleu - spbleu - chrf++ inference: false --- # NLLB-200 This is the model card of NLLB-200's distilled 600M variant. Here are the [metrics](https://tinyurl.com/nllb200densedst600mmetrics) for that particular checkpoint. - Information about training algorithms, parameters, fairness constraints or other applied approaches, and features. The exact training algorithm, data and the strategies to handle data imbalances for high and low resource languages that were used to train NLLB-200 is described in the paper. - Paper or other resource for more information NLLB Team et al, No Language Left Behind: Scaling Human-Centered Machine Translation, Arxiv, 2022 - License: CC-BY-NC - Where to send questions or comments about the model: https://github.com/facebookresearch/fairseq/issues ## Intended Use - Primary intended uses: NLLB-200 is a machine translation model primarily intended for research in machine translation, - especially for low-resource languages. It allows for single sentence translation among 200 languages. Information on how to - use the model can be found in Fairseq code repository along with the training code and references to evaluation and training data. - Primary intended users: Primary users are researchers and machine translation research community. - Out-of-scope use cases: NLLB-200 is a research model and is not released for production deployment. NLLB-200 is trained on general domain text data and is not intended to be used with domain specific texts, such as medical domain or legal domain. The model is not intended to be used for document translation. The model was trained with input lengths not exceeding 512 tokens, therefore translating longer sequences might result in quality degradation. NLLB-200 translations can not be used as certified translations. ## Metrics • Model performance measures: NLLB-200 model was evaluated using BLEU, spBLEU, and chrF++ metrics widely adopted by machine translation community. Additionally, we performed human evaluation with the XSTS protocol and measured the toxicity of the generated translations. ## Evaluation Data - Datasets: Flores-200 dataset is described in Section 4 - Motivation: We used Flores-200 as it provides full evaluation coverage of the languages in NLLB-200 - Preprocessing: Sentence-split raw text data was preprocessed using SentencePiece. The SentencePiece model is released along with NLLB-200. ## Training Data • We used parallel multilingual data from a variety of sources to train the model. We provide detailed report on data selection and construction process in Section 5 in the paper. We also used monolingual data constructed from Common Crawl. We provide more details in Section 5.2. ## Ethical Considerations • In this work, we took a reflexive approach in technological development to ensure that we prioritize human users and minimize risks that could be transferred to them. While we reflect on our ethical considerations throughout the article, here are some additional points to highlight. For one, many languages chosen for this study are low-resource languages, with a heavy emphasis on African languages. While quality translation could improve education and information access in many in these communities, such an access could also make groups with lower levels of digital literacy more vulnerable to misinformation or online scams. The latter scenarios could arise if bad actors misappropriate our work for nefarious activities, which we conceive as an example of unintended use. Regarding data acquisition, the training data used for model development were mined from various publicly available sources on the web. Although we invested heavily in data cleaning, personally identifiable information may not be entirely eliminated. Finally, although we did our best to optimize for translation quality, mistranslations produced by the model could remain. Although the odds are low, this could have adverse impact on those who rely on these translations to make important decisions (particularly when related to health and safety). ## Caveats and Recommendations • Our model has been tested on the Wikimedia domain with limited investigation on other domains supported in NLLB-MD. In addition, the supported languages may have variations that our model is not capturing. Users should make appropriate assessments. ## Carbon Footprint Details • The carbon dioxide (CO2e) estimate is reported in Section 8.8.
akdeniz27/bert-base-turkish-cased-ner
akdeniz27
"2023-03-18T23:08:33Z"
573,232
12
transformers
[ "transformers", "pytorch", "onnx", "safetensors", "bert", "token-classification", "tr", "doi:10.57967/hf/0949", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
token-classification
"2022-03-02T23:29:05Z"
--- language: tr widget: - text: "Mustafa Kemal Atatürk 19 Mayıs 1919'da Samsun'a çıktı." --- # Turkish Named Entity Recognition (NER) Model This model is the fine-tuned model of "dbmdz/bert-base-turkish-cased" using a reviewed version of well known Turkish NER dataset (https://github.com/stefan-it/turkish-bert/files/4558187/nerdata.txt). # Fine-tuning parameters: ``` task = "ner" model_checkpoint = "dbmdz/bert-base-turkish-cased" batch_size = 8 label_list = ['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] max_length = 512 learning_rate = 2e-5 num_train_epochs = 3 weight_decay = 0.01 ``` # How to use: ``` model = AutoModelForTokenClassification.from_pretrained("akdeniz27/bert-base-turkish-cased-ner") tokenizer = AutoTokenizer.from_pretrained("akdeniz27/bert-base-turkish-cased-ner") ner = pipeline('ner', model=model, tokenizer=tokenizer, aggregation_strategy="first") ner("your text here") ``` Pls refer "https://huggingface.co/transformers/_modules/transformers/pipelines/token_classification.html" for entity grouping with aggregation_strategy parameter. # Reference test results: * accuracy: 0.9933935699477056 * f1: 0.9592969472710453 * precision: 0.9543530277931161 * recall: 0.9642923563325274 Evaluation results with the test sets proposed in ["Küçük, D., Küçük, D., Arıcı, N. 2016. Türkçe Varlık İsmi Tanıma için bir Veri Kümesi ("A Named Entity Recognition Dataset for Turkish"). IEEE Sinyal İşleme, İletişim ve Uygulamaları Kurultayı. Zonguldak, Türkiye."](https://ieeexplore.ieee.org/document/7495744) paper. * Test Set Acc. Prec. Rec. F1-Score * 20010000 0.9946 0.9871 0.9463 0.9662 * 20020000 0.9928 0.9134 0.9206 0.9170 * 20030000 0.9942 0.9814 0.9186 0.9489 * 20040000 0.9943 0.9660 0.9522 0.9590 * 20050000 0.9971 0.9539 0.9932 0.9732 * 20060000 0.9993 0.9942 0.9942 0.9942 * 20070000 0.9970 0.9806 0.9439 0.9619 * 20080000 0.9988 0.9821 0.9649 0.9735 * 20090000 0.9977 0.9891 0.9479 0.9681 * 20100000 0.9961 0.9684 0.9293 0.9485 * Overall 0.9961 0.9720 0.9516 0.9617
huggyllama/llama-13b
huggyllama
"2023-04-07T15:50:53Z"
572,127
134
transformers
[ "transformers", "pytorch", "safetensors", "llama", "text-generation", "license:other", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2023-04-03T23:37:51Z"
--- license: other --- This contains the weights for the LLaMA-13b model. This model is under a non-commercial license (see the LICENSE file). You should only use this repository if you have been granted access to the model by filling out [this form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform?usp=send_form) but either lost your copy of the weights or got some trouble converting them to the Transformers format.
deepset/gelectra-base-germanquad
deepset
"2023-05-05T07:02:56Z"
571,888
23
transformers
[ "transformers", "pytorch", "tf", "safetensors", "electra", "question-answering", "exbert", "de", "dataset:deepset/germanquad", "license:mit", "endpoints_compatible", "has_space", "region:us" ]
question-answering
"2022-03-02T23:29:05Z"
--- language: de datasets: - deepset/germanquad license: mit thumbnail: https://thumb.tildacdn.com/tild3433-3637-4830-a533-353833613061/-/resize/720x/-/format/webp/germanquad.jpg tags: - exbert --- ![bert_image](https://thumb.tildacdn.com/tild3433-3637-4830-a533-353833613061/-/resize/720x/-/format/webp/germanquad.jpg) ## Overview **Language model:** gelectra-base-germanquad **Language:** German **Training data:** GermanQuAD train set (~ 12MB) **Eval data:** GermanQuAD test set (~ 5MB) **Infrastructure**: 1x V100 GPU **Published**: Apr 21st, 2021 ## Details - We trained a German question answering model with a gelectra-base model as its basis. - The dataset is GermanQuAD, a new, German language dataset, which we hand-annotated and published [online](https://deepset.ai/germanquad). - The training dataset is one-way annotated and contains 11518 questions and 11518 answers, while the test dataset is three-way annotated so that there are 2204 questions and with 2204·3−76 = 6536answers, because we removed 76 wrong answers. See https://deepset.ai/germanquad for more details and dataset download in SQuAD format. ## Hyperparameters ``` batch_size = 24 n_epochs = 2 max_seq_len = 384 learning_rate = 3e-5 lr_schedule = LinearWarmup embeds_dropout_prob = 0.1 ``` ## Performance We evaluated the extractive question answering performance on our GermanQuAD test set. Model types and training data are included in the model name. For finetuning XLM-Roberta, we use the English SQuAD v2.0 dataset. The GELECTRA models are warm started on the German translation of SQuAD v1.1 and finetuned on [GermanQuAD](https://deepset.ai/germanquad). The human baseline was computed for the 3-way test set by taking one answer as prediction and the other two as ground truth. ![performancetable](https://images.prismic.io/deepset/1c63afd8-40e6-4fd9-85c4-0dbb81996183_german-qa-vs-xlm-r.png) ## Authors **Timo Möller:** timo.moeller@deepset.ai **Julian Risch:** julian.risch@deepset.ai **Malte Pietsch:** malte.pietsch@deepset.ai ## About us <div class="grid lg:grid-cols-2 gap-x-4 gap-y-3"> <div class="w-full h-40 object-cover mb-2 rounded-lg flex items-center justify-center"> <img alt="" src="https://raw.githubusercontent.com/deepset-ai/.github/main/deepset-logo-colored.png" class="w-40"/> </div> <div class="w-full h-40 object-cover mb-2 rounded-lg flex items-center justify-center"> <img alt="" src="https://raw.githubusercontent.com/deepset-ai/.github/main/haystack-logo-colored.png" class="w-40"/> </div> </div> [deepset](http://deepset.ai/) is the company behind the open-source NLP framework [Haystack](https://haystack.deepset.ai/) which is designed to help you build production ready NLP systems that use: Question answering, summarization, ranking etc. Some of our other work: - [Distilled roberta-base-squad2 (aka "tinyroberta-squad2")]([https://huggingface.co/deepset/tinyroberta-squad2) - [German BERT (aka "bert-base-german-cased")](https://deepset.ai/german-bert) - [GermanQuAD and GermanDPR datasets and models (aka "gelectra-base-germanquad", "gbert-base-germandpr")](https://deepset.ai/germanquad) ## Get in touch and join the Haystack community <p>For more info on Haystack, visit our <strong><a href="https://github.com/deepset-ai/haystack">GitHub</a></strong> repo and <strong><a href="https://docs.haystack.deepset.ai">Documentation</a></strong>. We also have a <strong><a class="h-7" href="https://haystack.deepset.ai/community">Discord community open to everyone!</a></strong></p> [Twitter](https://twitter.com/deepset_ai) | [LinkedIn](https://www.linkedin.com/company/deepset-ai/) | [Discord](https://haystack.deepset.ai/community) | [GitHub Discussions](https://github.com/deepset-ai/haystack/discussions) | [Website](https://deepset.ai) By the way: [we're hiring!](http://www.deepset.ai/jobs)
Falconsai/nsfw_image_detection
Falconsai
"2023-12-06T17:18:38Z"
567,535
125
transformers
[ "transformers", "pytorch", "safetensors", "vit", "image-classification", "arxiv:2010.11929", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
image-classification
"2023-10-13T23:50:01Z"
--- license: apache-2.0 pipeline_tag: image-classification --- # Model Card: Fine-Tuned Vision Transformer (ViT) for NSFW Image Classification ## Model Description The **Fine-Tuned Vision Transformer (ViT)** is a variant of the transformer encoder architecture, similar to BERT, that has been adapted for image classification tasks. This specific model, named "google/vit-base-patch16-224-in21k," is pre-trained on a substantial collection of images in a supervised manner, leveraging the ImageNet-21k dataset. The images in the pre-training dataset are resized to a resolution of 224x224 pixels, making it suitable for a wide range of image recognition tasks. During the training phase, meticulous attention was given to hyperparameter settings to ensure optimal model performance. The model was fine-tuned with a judiciously chosen batch size of 16. This choice not only balanced computational efficiency but also allowed for the model to effectively process and learn from a diverse array of images. To facilitate this fine-tuning process, a learning rate of 5e-5 was employed. The learning rate serves as a critical tuning parameter that dictates the magnitude of adjustments made to the model's parameters during training. In this case, a learning rate of 5e-5 was selected to strike a harmonious balance between rapid convergence and steady optimization, resulting in a model that not only learns swiftly but also steadily refines its capabilities throughout the training process. This training phase was executed using a proprietary dataset containing an extensive collection of 80,000 images, each characterized by a substantial degree of variability. The dataset was thoughtfully curated to include two distinct classes, namely "normal" and "nsfw." This diversity allowed the model to grasp nuanced visual patterns, equipping it with the competence to accurately differentiate between safe and explicit content. The overarching objective of this meticulous training process was to impart the model with a deep understanding of visual cues, ensuring its robustness and competence in tackling the specific task of NSFW image classification. The result is a model that stands ready to contribute significantly to content safety and moderation, all while maintaining the highest standards of accuracy and reliability. ## Intended Uses & Limitations ### Intended Uses - **NSFW Image Classification**: The primary intended use of this model is for the classification of NSFW (Not Safe for Work) images. It has been fine-tuned for this purpose, making it suitable for filtering explicit or inappropriate content in various applications. ### How to use Here is how to use this model to classifiy an image based on 1 of 2 classes (normal,nsfw): ```markdown # Use a pipeline as a high-level helper from PIL import Image from transformers import pipeline img = Image.open("<path_to_image_file>") classifier = pipeline("image-classification", model="Falconsai/nsfw_image_detection") classifier(img) ``` <hr> ``` markdown # Load model directly import torch from PIL import Image from transformers import AutoModelForImageClassification, ViTImageProcessor img = Image.open("<path_to_image_file>") model = AutoModelForImageClassification.from_pretrained("Falconsai/nsfw_image_detection") processor = ViTImageProcessor.from_pretrained('Falconsai/nsfw_image_detection') with torch.no_grad(): inputs = processor(images=img, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits predicted_label = logits.argmax(-1).item() model.config.id2label[predicted_label] ``` <hr> ### Limitations - **Specialized Task Fine-Tuning**: While the model is adept at NSFW image classification, its performance may vary when applied to other tasks. - Users interested in employing this model for different tasks should explore fine-tuned versions available in the model hub for optimal results. ## Training Data The model's training data includes a proprietary dataset comprising approximately 80,000 images. This dataset encompasses a significant amount of variability and consists of two distinct classes: "normal" and "nsfw." The training process on this data aimed to equip the model with the ability to distinguish between safe and explicit content effectively. ### Training Stats ``` markdown - 'eval_loss': 0.07463177293539047, - 'eval_accuracy': 0.980375, - 'eval_runtime': 304.9846, - 'eval_samples_per_second': 52.462, - 'eval_steps_per_second': 3.279 ``` <hr> **Note:** It's essential to use this model responsibly and ethically, adhering to content guidelines and applicable regulations when implementing it in real-world applications, particularly those involving potentially sensitive content. For more details on model fine-tuning and usage, please refer to the model's documentation and the model hub. ## References - [Hugging Face Model Hub](https://huggingface.co/models) - [Vision Transformer (ViT) Paper](https://arxiv.org/abs/2010.11929) - [ImageNet-21k Dataset](http://www.image-net.org/) **Disclaimer:** The model's performance may be influenced by the quality and representativeness of the data it was fine-tuned on. Users are encouraged to assess the model's suitability for their specific applications and datasets.
j-hartmann/emotion-english-distilroberta-base
j-hartmann
"2023-01-02T13:03:10Z"
563,253
284
transformers
[ "transformers", "pytorch", "tf", "roberta", "text-classification", "distilroberta", "sentiment", "emotion", "twitter", "reddit", "en", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text-classification
"2022-03-02T23:29:05Z"
--- language: "en" tags: - distilroberta - sentiment - emotion - twitter - reddit widget: - text: "Oh wow. I didn't know that." - text: "This movie always makes me cry.." - text: "Oh Happy Day" --- # Emotion English DistilRoBERTa-base # Description ℹ With this model, you can classify emotions in English text data. The model was trained on 6 diverse datasets (see Appendix below) and predicts Ekman's 6 basic emotions, plus a neutral class: 1) anger 🤬 2) disgust 🤢 3) fear 😨 4) joy 😀 5) neutral 😐 6) sadness 😭 7) surprise 😲 The model is a fine-tuned checkpoint of [DistilRoBERTa-base](https://huggingface.co/distilroberta-base). For a 'non-distilled' emotion model, please refer to the model card of the [RoBERTa-large](https://huggingface.co/j-hartmann/emotion-english-roberta-large) version. # Application 🚀 a) Run emotion model with 3 lines of code on single text example using Hugging Face's pipeline command on Google Colab: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/j-hartmann/emotion-english-distilroberta-base/blob/main/simple_emotion_pipeline.ipynb) ```python from transformers import pipeline classifier = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True) classifier("I love this!") ``` ```python Output: [[{'label': 'anger', 'score': 0.004419783595949411}, {'label': 'disgust', 'score': 0.0016119900392368436}, {'label': 'fear', 'score': 0.0004138521908316761}, {'label': 'joy', 'score': 0.9771687984466553}, {'label': 'neutral', 'score': 0.005764586851000786}, {'label': 'sadness', 'score': 0.002092392183840275}, {'label': 'surprise', 'score': 0.008528684265911579}]] ``` b) Run emotion model on multiple examples and full datasets (e.g., .csv files) on Google Colab: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/j-hartmann/emotion-english-distilroberta-base/blob/main/emotion_prediction_example.ipynb) # Contact 💻 Please reach out to [jochen.hartmann@tum.de](mailto:jochen.hartmann@tum.de) if you have any questions or feedback. Thanks to Samuel Domdey and [chrsiebert](https://huggingface.co/siebert) for their support in making this model available. # Reference ✅ For attribution, please cite the following reference if you use this model. A working paper will be available soon. ``` Jochen Hartmann, "Emotion English DistilRoBERTa-base". https://huggingface.co/j-hartmann/emotion-english-distilroberta-base/, 2022. ``` BibTex citation: ``` @misc{hartmann2022emotionenglish, author={Hartmann, Jochen}, title={Emotion English DistilRoBERTa-base}, year={2022}, howpublished = {\url{https://huggingface.co/j-hartmann/emotion-english-distilroberta-base/}}, } ``` # Appendix 📚 Please find an overview of the datasets used for training below. All datasets contain English text. The table summarizes which emotions are available in each of the datasets. The datasets represent a diverse collection of text types. Specifically, they contain emotion labels for texts from Twitter, Reddit, student self-reports, and utterances from TV dialogues. As MELD (Multimodal EmotionLines Dataset) extends the popular EmotionLines dataset, EmotionLines itself is not included here. |Name|anger|disgust|fear|joy|neutral|sadness|surprise| |---|---|---|---|---|---|---|---| |Crowdflower (2016)|Yes|-|-|Yes|Yes|Yes|Yes| |Emotion Dataset, Elvis et al. (2018)|Yes|-|Yes|Yes|-|Yes|Yes| |GoEmotions, Demszky et al. (2020)|Yes|Yes|Yes|Yes|Yes|Yes|Yes| |ISEAR, Vikash (2018)|Yes|Yes|Yes|Yes|-|Yes|-| |MELD, Poria et al. (2019)|Yes|Yes|Yes|Yes|Yes|Yes|Yes| |SemEval-2018, EI-reg, Mohammad et al. (2018) |Yes|-|Yes|Yes|-|Yes|-| The model is trained on a balanced subset from the datasets listed above (2,811 observations per emotion, i.e., nearly 20k observations in total). 80% of this balanced subset is used for training and 20% for evaluation. The evaluation accuracy is 66% (vs. the random-chance baseline of 1/7 = 14%). # Scientific Applications 📖 Below you can find a list of papers using "Emotion English DistilRoBERTa-base". If you would like your paper to be added to the list, please send me an email. - Butt, S., Sharma, S., Sharma, R., Sidorov, G., & Gelbukh, A. (2022). What goes on inside rumour and non-rumour tweets and their reactions: A Psycholinguistic Analyses. Computers in Human Behavior, 107345. - Kuang, Z., Zong, S., Zhang, J., Chen, J., & Liu, H. (2022). Music-to-Text Synaesthesia: Generating Descriptive Text from Music Recordings. arXiv preprint arXiv:2210.00434. - Rozado, D., Hughes, R., & Halberstadt, J. (2022). Longitudinal analysis of sentiment and emotion in news media headlines using automated labelling with Transformer language models. Plos one, 17(10), e0276367.
facebook/mms-tts-eng
facebook
"2023-09-06T13:32:25Z"
562,646
102
transformers
[ "transformers", "pytorch", "safetensors", "vits", "text-to-audio", "mms", "text-to-speech", "arxiv:2305.13516", "license:cc-by-nc-4.0", "endpoints_compatible", "has_space", "region:us" ]
text-to-speech
"2023-08-24T09:09:22Z"
--- license: cc-by-nc-4.0 tags: - mms - vits pipeline_tag: text-to-speech --- # Massively Multilingual Speech (MMS): English Text-to-Speech This repository contains the **English (eng)** language text-to-speech (TTS) model checkpoint. This model is part of Facebook's [Massively Multilingual Speech](https://arxiv.org/abs/2305.13516) project, aiming to provide speech technology across a diverse range of languages. You can find more details about the supported languages and their ISO 639-3 codes in the [MMS Language Coverage Overview](https://dl.fbaipublicfiles.com/mms/misc/language_coverage_mms.html), and see all MMS-TTS checkpoints on the Hugging Face Hub: [facebook/mms-tts](https://huggingface.co/models?sort=trending&search=facebook%2Fmms-tts). MMS-TTS is available in the 🤗 Transformers library from version 4.33 onwards. ## Model Details VITS (**V**ariational **I**nference with adversarial learning for end-to-end **T**ext-to-**S**peech) is an end-to-end speech synthesis model that predicts a speech waveform conditional on an input text sequence. It is a conditional variational autoencoder (VAE) comprised of a posterior encoder, decoder, and conditional prior. A set of spectrogram-based acoustic features are predicted by the flow-based module, which is formed of a Transformer-based text encoder and multiple coupling layers. The spectrogram is decoded using a stack of transposed convolutional layers, much in the same style as the HiFi-GAN vocoder. Motivated by the one-to-many nature of the TTS problem, where the same text input can be spoken in multiple ways, the model also includes a stochastic duration predictor, which allows the model to synthesise speech with different rhythms from the same input text. The model is trained end-to-end with a combination of losses derived from variational lower bound and adversarial training. To improve the expressiveness of the model, normalizing flows are applied to the conditional prior distribution. During inference, the text encodings are up-sampled based on the duration prediction module, and then mapped into the waveform using a cascade of the flow module and HiFi-GAN decoder. Due to the stochastic nature of the duration predictor, the model is non-deterministic, and thus requires a fixed seed to generate the same speech waveform. For the MMS project, a separate VITS checkpoint is trained on each langauge. ## Usage MMS-TTS is available in the 🤗 Transformers library from version 4.33 onwards. To use this checkpoint, first install the latest version of the library: ``` pip install --upgrade transformers accelerate ``` Then, run inference with the following code-snippet: ```python from transformers import VitsModel, AutoTokenizer import torch model = VitsModel.from_pretrained("facebook/mms-tts-eng") tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-eng") text = "some example text in the English language" inputs = tokenizer(text, return_tensors="pt") with torch.no_grad(): output = model(**inputs).waveform ``` The resulting waveform can be saved as a `.wav` file: ```python import scipy scipy.io.wavfile.write("techno.wav", rate=model.config.sampling_rate, data=output.float().numpy()) ``` Or displayed in a Jupyter Notebook / Google Colab: ```python from IPython.display import Audio Audio(output.numpy(), rate=model.config.sampling_rate) ``` ## BibTex citation This model was developed by Vineel Pratap et al. from Meta AI. If you use the model, consider citing the MMS paper: ``` @article{pratap2023mms, title={Scaling Speech Technology to 1,000+ Languages}, author={Vineel Pratap and Andros Tjandra and Bowen Shi and Paden Tomasello and Arun Babu and Sayani Kundu and Ali Elkahky and Zhaoheng Ni and Apoorv Vyas and Maryam Fazel-Zarandi and Alexei Baevski and Yossi Adi and Xiaohui Zhang and Wei-Ning Hsu and Alexis Conneau and Michael Auli}, journal={arXiv}, year={2023} } ``` ## License The model is licensed as **CC-BY-NC 4.0**.
maidalun1020/bce-embedding-base_v1
maidalun1020
"2024-04-09T09:56:59Z"
560,705
176
sentence-transformers
[ "sentence-transformers", "pytorch", "xlm-roberta", "feature-extraction", "sentence-similarity", "transformers", "en", "zh", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
feature-extraction
"2023-12-29T07:38:08Z"
--- license: apache-2.0 pipeline_tag: feature-extraction tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers language: - en - zh --- <!-- * @Description: * @Author: shenlei * @Date: 2023-12-19 10:31:41 * @LastEditTime: 2024-01-09 23:52:00 * @LastEditors: shenlei --> <h1 align="center">BCEmbedding: Bilingual and Crosslingual Embedding for RAG</h1> <p align="center"> <a href="https://github.com/netease-youdao/BCEmbedding/blob/master/LICENSE"> <img src="https://img.shields.io/badge/license-Apache--2.0-yellow"> </a> <a href="https://twitter.com/YDopensource"> <img src="https://img.shields.io/badge/follow-%40YDOpenSource-1DA1F2?logo=twitter&style={style}"> </a> </p> 最新、最详细的bce-embedding-base_v1相关信息,请移步(The latest "Updates" should be checked in): <p align="left"> <a href="https://github.com/netease-youdao/BCEmbedding">GitHub</a> </p> ## 主要特点(Key Features): - 中英双语,以及中英跨语种能力(Bilingual and Crosslingual capability in English and Chinese); - RAG优化,适配更多真实业务场景(RAG adaptation for more domains, including Education, Law, Finance, Medical, Literature, FAQ, Textbook, Wikipedia, etc.); - 方便集成进langchain和llamaindex(Easy integrations for langchain and llamaindex in <a href="https://github.com/netease-youdao/BCEmbedding">BCEmbedding</a>)。 - `EmbeddingModel`不需要“精心设计”instruction,尽可能召回有用片段。 (No need for "instruction") - **最佳实践(Best practice)** :embedding召回top50-100片段,reranker对这50-100片段精排,最后取top5-10片段。(1. Get top 50-100 passages with [bce-embedding-base_v1](https://huggingface.co/maidalun1020/bce-embedding-base_v1) for "`recall`"; 2. Rerank passages with [bce-reranker-base_v1](https://huggingface.co/maidalun1020/bce-reranker-base_v1) and get top 5-10 for "`precision`" finally. ) ## News: - `BCEmbedding`技术博客( **Technical Blog** ): [为RAG而生-BCEmbedding技术报告](https://zhuanlan.zhihu.com/p/681370855) - Related link for **RerankerModel** : [bce-reranker-base_v1](https://huggingface.co/maidalun1020/bce-reranker-base_v1) ## Third-party Examples: - RAG applications: [QAnything](https://github.com/netease-youdao/qanything), [HuixiangDou](https://github.com/InternLM/HuixiangDou), [ChatPDF](https://github.com/shibing624/ChatPDF). - Efficient inference framework: [ChatLLM.cpp](https://github.com/foldl/chatllm.cpp), [Xinference](https://github.com/xorbitsai/inference), [mindnlp (Huawei GPU, 华为GPU)](https://github.com/mindspore-lab/mindnlp/tree/master/llm/inference/bce). ![image/jpeg](assets/rag_eval_multiple_domains_summary.jpg) ![image/jpeg](assets/Wechat.jpg) ----------------------------------------- <details open="open"> <summary>Click to Open Contents</summary> - <a href="#-bilingual-and-crosslingual-superiority" target="_Self">🌐 Bilingual and Crosslingual Superiority</a> - <a href="#-key-features" target="_Self">💡 Key Features</a> - <a href="#-latest-updates" target="_Self">🚀 Latest Updates</a> - <a href="#-model-list" target="_Self">🍎 Model List</a> - <a href="#-manual" target="_Self">📖 Manual</a> - <a href="#installation" target="_Self">Installation</a> - <a href="#quick-start" target="_Self">Quick Start (`transformers`, `sentence-transformers`)</a> - <a href="#integrations-for-rag-frameworks" target="_Self">Integrations for RAG Frameworks (`langchain`, `llama_index`)</a> - <a href="#%EF%B8%8F-evaluation" target="_Self">⚙️ Evaluation</a> - <a href="#evaluate-semantic-representation-by-mteb" target="_Self">Evaluate Semantic Representation by MTEB</a> - <a href="#evaluate-rag-by-llamaindex" target="_Self">Evaluate RAG by LlamaIndex</a> - <a href="#-leaderboard" target="_Self">📈 Leaderboard</a> - <a href="#semantic-representation-evaluations-in-mteb" target="_Self">Semantic Representation Evaluations in MTEB</a> - <a href="#rag-evaluations-in-llamaindex" target="_Self">RAG Evaluations in LlamaIndex</a> - <a href="#-youdaos-bcembedding-api" target="_Self">🛠 Youdao's BCEmbedding API</a> - <a href="#-wechat-group" target="_Self">🧲 WeChat Group</a> - <a href="#%EF%B8%8F-citation" target="_Self">✏️ Citation</a> - <a href="#-license" target="_Self">🔐 License</a> - <a href="#-related-links" target="_Self">🔗 Related Links</a> </details> <br> **B**ilingual and **C**rosslingual **Embedding** (`BCEmbedding`), developed by NetEase Youdao, encompasses `EmbeddingModel` and `RerankerModel`. The `EmbeddingModel` specializes in generating semantic vectors, playing a crucial role in semantic search and question-answering, and the `RerankerModel` excels at refining search results and ranking tasks. `BCEmbedding` serves as the cornerstone of Youdao's Retrieval Augmented Generation (RAG) implmentation, notably [QAnything](http://qanything.ai) [[github](https://github.com/netease-youdao/qanything)], an open-source implementation widely integrated in various Youdao products like [Youdao Speed Reading](https://read.youdao.com/#/home) and [Youdao Translation](https://fanyi.youdao.com/download-Mac?keyfrom=fanyiweb_navigation). Distinguished for its bilingual and crosslingual proficiency, `BCEmbedding` excels in bridging Chinese and English linguistic gaps, which achieves - **A high performence on <a href="#semantic-representation-evaluations-in-mteb">Semantic Representation Evaluations in MTEB</a>**; - **A new benchmark in the realm of <a href="#rag-evaluations-in-llamaindex">RAG Evaluations in LlamaIndex</a>**. `BCEmbedding`是由网易有道开发的双语和跨语种语义表征算法模型库,其中包含`EmbeddingModel`和`RerankerModel`两类基础模型。`EmbeddingModel`专门用于生成语义向量,在语义搜索和问答中起着关键作用,而`RerankerModel`擅长优化语义搜索结果和语义相关顺序精排。 `BCEmbedding`作为有道的检索增强生成式应用(RAG)的基石,特别是在[QAnything](http://qanything.ai) [[github](https://github.com/netease-youdao/qanything)]中发挥着重要作用。QAnything作为一个网易有道开源项目,在有道许多产品中有很好的应用实践,比如[有道速读](https://read.youdao.com/#/home)和[有道翻译](https://fanyi.youdao.com/download-Mac?keyfrom=fanyiweb_navigation) `BCEmbedding`以其出色的双语和跨语种能力而著称,在语义检索中消除中英语言之间的差异,从而实现: - **强大的双语和跨语种语义表征能力【<a href="#semantic-representation-evaluations-in-mteb">基于MTEB的语义表征评测指标</a>】。** - **基于LlamaIndex的RAG评测,表现SOTA【<a href="#rag-evaluations-in-llamaindex">基于LlamaIndex的RAG评测指标</a>】。** ## 🌐 Bilingual and Crosslingual Superiority Existing embedding models often encounter performance challenges in bilingual and crosslingual scenarios, particularly in Chinese, English and their crosslingual tasks. `BCEmbedding`, leveraging the strength of Youdao's translation engine, excels in delivering superior performance across monolingual, bilingual, and crosslingual settings. `EmbeddingModel` supports ***Chinese (ch) and English (en)*** (more languages support will come soon), while `RerankerModel` supports ***Chinese (ch), English (en), Japanese (ja) and Korean (ko)***. 现有的单个语义表征模型在双语和跨语种场景中常常表现不佳,特别是在中文、英文及其跨语种任务中。`BCEmbedding`充分利用有道翻译引擎的优势,实现只需一个模型就可以在单语、双语和跨语种场景中表现出卓越的性能。 `EmbeddingModel`支持***中文和英文***(之后会支持更多语种);`RerankerModel`支持***中文,英文,日文和韩文***。 ## 💡 Key Features - **Bilingual and Crosslingual Proficiency**: Powered by Youdao's translation engine, excelling in Chinese, English and their crosslingual retrieval task, with upcoming support for additional languages. - **RAG-Optimized**: Tailored for diverse RAG tasks including **translation, summarization, and question answering**, ensuring accurate **query understanding**. See <a href=#rag-evaluations-in-llamaindex>RAG Evaluations in LlamaIndex</a>. - **Efficient and Precise Retrieval**: Dual-encoder for efficient retrieval of `EmbeddingModel` in first stage, and cross-encoder of `RerankerModel` for enhanced precision and deeper semantic analysis in second stage. - **Broad Domain Adaptability**: Trained on diverse datasets for superior performance across various fields. - **User-Friendly Design**: Instruction-free, versatile use for multiple tasks without specifying query instruction for each task. - **Meaningful Reranking Scores**: `RerankerModel` provides relevant scores to improve result quality and optimize large language model performance. - **Proven in Production**: Successfully implemented and validated in Youdao's products. - **双语和跨语种能力**:基于有道翻译引擎的强大能力,我们的`BCEmbedding`具备强大的中英双语和跨语种语义表征能力。 - **RAG适配**:面向RAG做了针对性优化,可以适配大多数相关任务,比如**翻译,摘要,问答**等。此外,针对**问题理解**(query understanding)也做了针对优化,详见 <a href="#rag-evaluations-in-llamaindex">基于LlamaIndex的RAG评测指标</a>。 - **高效且精确的语义检索**:`EmbeddingModel`采用双编码器,可以在第一阶段实现高效的语义检索。`RerankerModel`采用交叉编码器,可以在第二阶段实现更高精度的语义顺序精排。 - **更好的领域泛化性**:为了在更多场景实现更好的效果,我们收集了多种多样的领域数据。 - **用户友好**:语义检索时不需要特殊指令前缀。也就是,你不需要为各种任务绞尽脑汁设计指令前缀。 - **有意义的重排序分数**:`RerankerModel`可以提供有意义的语义相关性分数(不仅仅是排序),可以用于过滤无意义文本片段,提高大模型生成效果。 - **产品化检验**:`BCEmbedding`已经被有道众多真实产品检验。 ## 🚀 Latest Updates - ***2024-01-03***: **Model Releases** - [bce-embedding-base_v1](https://huggingface.co/maidalun1020/bce-embedding-base_v1) and [bce-reranker-base_v1](https://huggingface.co/maidalun1020/bce-reranker-base_v1) are available. - ***2024-01-03***: **Eval Datasets** [[CrosslingualMultiDomainsDataset](https://huggingface.co/datasets/maidalun1020/CrosslingualMultiDomainsDataset)] - Evaluate the performence of RAG, using [LlamaIndex](https://github.com/run-llama/llama_index). - ***2024-01-03***: **Eval Datasets** [[Details](https://github.com/netease-youdao/BCEmbedding/blob/master/BCEmbedding/evaluation/c_mteb/Retrieval.py)] - Evaluate the performence of crosslingual semantic representation, using [MTEB](https://github.com/embeddings-benchmark/mteb). - ***2024-01-03***: **模型发布** - [bce-embedding-base_v1](https://huggingface.co/maidalun1020/bce-embedding-base_v1)和[bce-reranker-base_v1](https://huggingface.co/maidalun1020/bce-reranker-base_v1)已发布. - ***2024-01-03***: **RAG评测数据** [[CrosslingualMultiDomainsDataset](https://huggingface.co/datasets/maidalun1020/CrosslingualMultiDomainsDataset)] - 基于[LlamaIndex](https://github.com/run-llama/llama_index)的RAG评测数据已发布。 - ***2024-01-03***: **跨语种语义表征评测数据** [[详情](https://github.com/netease-youdao/BCEmbedding/blob/master/BCEmbedding/evaluation/c_mteb/Retrieval.py)] - 基于[MTEB](https://github.com/embeddings-benchmark/mteb)的跨语种评测数据已发布. ## 🍎 Model List | Model Name | Model Type | Languages | Parameters | Weights | |:-------------------------------|:--------:|:--------:|:--------:|:--------:| | bce-embedding-base_v1 | `EmbeddingModel` | ch, en | 279M | [download](https://huggingface.co/maidalun1020/bce-embedding-base_v1) | | bce-reranker-base_v1 | `RerankerModel` | ch, en, ja, ko | 279M | [download](https://huggingface.co/maidalun1020/bce-reranker-base_v1) | ## 📖 Manual ### Installation First, create a conda environment and activate it. ```bash conda create --name bce python=3.10 -y conda activate bce ``` Then install `BCEmbedding` for minimal installation: ```bash pip install BCEmbedding==0.1.1 ``` Or install from source: ```bash git clone git@github.com:netease-youdao/BCEmbedding.git cd BCEmbedding pip install -v -e . ``` ### Quick Start #### 1. Based on `BCEmbedding` Use `EmbeddingModel`, and `cls` [pooler](./BCEmbedding/models/embedding.py#L24) is default. ```python from BCEmbedding import EmbeddingModel # list of sentences sentences = ['sentence_0', 'sentence_1', ...] # init embedding model model = EmbeddingModel(model_name_or_path="maidalun1020/bce-embedding-base_v1") # extract embeddings embeddings = model.encode(sentences) ``` Use `RerankerModel` to calculate relevant scores and rerank: ```python from BCEmbedding import RerankerModel # your query and corresponding passages query = 'input_query' passages = ['passage_0', 'passage_1', ...] # construct sentence pairs sentence_pairs = [[query, passage] for passage in passages] # init reranker model model = RerankerModel(model_name_or_path="maidalun1020/bce-reranker-base_v1") # method 0: calculate scores of sentence pairs scores = model.compute_score(sentence_pairs) # method 1: rerank passages rerank_results = model.rerank(query, passages) ``` NOTE: - In [`RerankerModel.rerank`](./BCEmbedding/models/reranker.py#L137) method, we provide an advanced preproccess that we use in production for making `sentence_pairs`, when "passages" are very long. #### 2. Based on `transformers` For `EmbeddingModel`: ```python from transformers import AutoModel, AutoTokenizer # list of sentences sentences = ['sentence_0', 'sentence_1', ...] # init model and tokenizer tokenizer = AutoTokenizer.from_pretrained('maidalun1020/bce-embedding-base_v1') model = AutoModel.from_pretrained('maidalun1020/bce-embedding-base_v1') device = 'cuda' # if no GPU, set "cpu" model.to(device) # get inputs inputs = tokenizer(sentences, padding=True, truncation=True, max_length=512, return_tensors="pt") inputs_on_device = {k: v.to(self.device) for k, v in inputs.items()} # get embeddings outputs = model(**inputs_on_device, return_dict=True) embeddings = outputs.last_hidden_state[:, 0] # cls pooler embeddings = embeddings / embeddings.norm(dim=1, keepdim=True) # normalize ``` For `RerankerModel`: ```python import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification # init model and tokenizer tokenizer = AutoTokenizer.from_pretrained('maidalun1020/bce-reranker-base_v1') model = AutoModelForSequenceClassification.from_pretrained('maidalun1020/bce-reranker-base_v1') device = 'cuda' # if no GPU, set "cpu" model.to(device) # get inputs inputs = tokenizer(sentence_pairs, padding=True, truncation=True, max_length=512, return_tensors="pt") inputs_on_device = {k: v.to(device) for k, v in inputs.items()} # calculate scores scores = model(**inputs_on_device, return_dict=True).logits.view(-1,).float() scores = torch.sigmoid(scores) ``` #### 3. Based on `sentence_transformers` For `EmbeddingModel`: ```python from sentence_transformers import SentenceTransformer # list of sentences sentences = ['sentence_0', 'sentence_1', ...] # init embedding model ## New update for sentence-trnasformers. So clean up your "`SENTENCE_TRANSFORMERS_HOME`/maidalun1020_bce-embedding-base_v1" or "~/.cache/torch/sentence_transformers/maidalun1020_bce-embedding-base_v1" first for downloading new version. model = SentenceTransformer("maidalun1020/bce-embedding-base_v1") # extract embeddings embeddings = model.encode(sentences, normalize_embeddings=True) ``` For `RerankerModel`: ```python from sentence_transformers import CrossEncoder # init reranker model model = CrossEncoder('maidalun1020/bce-reranker-base_v1', max_length=512) # calculate scores of sentence pairs scores = model.predict(sentence_pairs) ``` ### Integrations for RAG Frameworks #### 1. Used in `langchain` ```python from langchain.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import FAISS from langchain_community.vectorstores.utils import DistanceStrategy query = 'apples' passages = [ 'I like apples', 'I like oranges', 'Apples and oranges are fruits' ] # init embedding model model_name = 'maidalun1020/bce-embedding-base_v1' model_kwargs = {'device': 'cuda'} encode_kwargs = {'batch_size': 64, 'normalize_embeddings': True, 'show_progress_bar': False} embed_model = HuggingFaceEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs ) # example #1. extract embeddings query_embedding = embed_model.embed_query(query) passages_embeddings = embed_model.embed_documents(passages) # example #2. langchain retriever example faiss_vectorstore = FAISS.from_texts(passages, embed_model, distance_strategy=DistanceStrategy.MAX_INNER_PRODUCT) retriever = faiss_vectorstore.as_retriever(search_type="similarity", search_kwargs={"score_threshold": 0.5, "k": 3}) related_passages = retriever.get_relevant_documents(query) ``` #### 2. Used in `llama_index` ```python from llama_index.embeddings import HuggingFaceEmbedding from llama_index import VectorStoreIndex, ServiceContext, SimpleDirectoryReader from llama_index.node_parser import SimpleNodeParser from llama_index.llms import OpenAI query = 'apples' passages = [ 'I like apples', 'I like oranges', 'Apples and oranges are fruits' ] # init embedding model model_args = {'model_name': 'maidalun1020/bce-embedding-base_v1', 'max_length': 512, 'embed_batch_size': 64, 'device': 'cuda'} embed_model = HuggingFaceEmbedding(**model_args) # example #1. extract embeddings query_embedding = embed_model.get_query_embedding(query) passages_embeddings = embed_model.get_text_embedding_batch(passages) # example #2. rag example llm = OpenAI(model='gpt-3.5-turbo-0613', api_key=os.environ.get('OPENAI_API_KEY'), api_base=os.environ.get('OPENAI_BASE_URL')) service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model) documents = SimpleDirectoryReader(input_files=["BCEmbedding/tools/eval_rag/eval_pdfs/Comp_en_llama2.pdf"]).load_data() node_parser = SimpleNodeParser.from_defaults(chunk_size=512) nodes = node_parser.get_nodes_from_documents(documents[0:36]) index = VectorStoreIndex(nodes, service_context=service_context) query_engine = index.as_query_engine() response = query_engine.query("What is llama?") ``` ## ⚙️ Evaluation ### Evaluate Semantic Representation by MTEB We provide evaluateion tools for `embedding` and `reranker` models, based on [MTEB](https://github.com/embeddings-benchmark/mteb) and [C_MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB). 我们基于[MTEB](https://github.com/embeddings-benchmark/mteb)和[C_MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB),提供`embedding`和`reranker`模型的语义表征评测工具。 #### 1. Embedding Models Just run following cmd to evaluate `your_embedding_model` (e.g. `maidalun1020/bce-embedding-base_v1`) in **bilingual and crosslingual settings** (e.g. `["en", "zh", "en-zh", "zh-en"]`). 运行下面命令评测`your_embedding_model`(比如,`maidalun1020/bce-embedding-base_v1`)。评测任务将会在**双语和跨语种**(比如,`["en", "zh", "en-zh", "zh-en"]`)模式下评测: ```bash python BCEmbedding/tools/eval_mteb/eval_embedding_mteb.py --model_name_or_path maidalun1020/bce-embedding-base_v1 --pooler cls ``` The total evaluation tasks contain ***114 datastes*** of **"Retrieval", "STS", "PairClassification", "Classification", "Reranking" and "Clustering"**. 评测包含 **"Retrieval", "STS", "PairClassification", "Classification", "Reranking"和"Clustering"** 这六大类任务的 ***114个数据集***。 ***NOTE:*** - **All models are evaluated in their recommended pooling method (`pooler`)**. - `mean` pooler: "jina-embeddings-v2-base-en", "m3e-base", "m3e-large", "e5-large-v2", "multilingual-e5-base", "multilingual-e5-large" and "gte-large". - `cls` pooler: Other models. - "jina-embeddings-v2-base-en" model should be loaded with `trust_remote_code`. ```bash python BCEmbedding/tools/eval_mteb/eval_embedding_mteb.py --model_name_or_path {moka-ai/m3e-base | moka-ai/m3e-large} --pooler mean python BCEmbedding/tools/eval_mteb/eval_embedding_mteb.py --model_name_or_path jinaai/jina-embeddings-v2-base-en --pooler mean --trust_remote_code ``` ***注意:*** - 所有模型的评测采用各自推荐的`pooler`。"jina-embeddings-v2-base-en", "m3e-base", "m3e-large", "e5-large-v2", "multilingual-e5-base", "multilingual-e5-large"和"gte-large"的 `pooler`采用`mean`,其他模型的`pooler`采用`cls`. - "jina-embeddings-v2-base-en"模型在载入时需要`trust_remote_code`。 #### 2. Reranker Models Run following cmd to evaluate `your_reranker_model` (e.g. "maidalun1020/bce-reranker-base_v1") in **bilingual and crosslingual settings** (e.g. `["en", "zh", "en-zh", "zh-en"]`). 运行下面命令评测`your_reranker_model`(比如,`maidalun1020/bce-reranker-base_v1`)。评测任务将会在 **双语种和跨语种**(比如,`["en", "zh", "en-zh", "zh-en"]`)模式下评测: ```bash python BCEmbedding/tools/eval_mteb/eval_reranker_mteb.py --model_name_or_path maidalun1020/bce-reranker-base_v1 ``` The evaluation tasks contain ***12 datastes*** of **"Reranking"**. 评测包含 **"Reranking"** 任务的 ***12个数据集***。 #### 3. Metrics Visualization Tool We proveide a one-click script to sumarize evaluation results of `embedding` and `reranker` models as [Embedding Models Evaluation Summary](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/embedding_eval_summary.md) and [Reranker Models Evaluation Summary](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/reranker_eval_summary.md). 我们提供了`embedding`和`reranker`模型的指标可视化一键脚本,输出一个markdown文件,详见[Embedding模型指标汇总](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/embedding_eval_summary.md)和[Reranker模型指标汇总](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/reranker_eval_summary.md)。 ```bash python BCEmbedding/evaluation/mteb/summarize_eval_results.py --results_dir {your_embedding_results_dir | your_reranker_results_dir} ``` ### Evaluate RAG by LlamaIndex [LlamaIndex](https://github.com/run-llama/llama_index) is a famous data framework for LLM-based applications, particularly in RAG. Recently, the [LlamaIndex Blog](https://blog.llamaindex.ai/boosting-rag-picking-the-best-embedding-reranker-models-42d079022e83) has evaluated the popular embedding and reranker models in RAG pipeline and attract great attention. Now, we follow its pipeline to evaluate our `BCEmbedding`. [LlamaIndex](https://github.com/run-llama/llama_index)是一个著名的大模型应用的开源工具,在RAG中很受欢迎。最近,[LlamaIndex博客](https://blog.llamaindex.ai/boosting-rag-picking-the-best-embedding-reranker-models-42d079022e83)对市面上常用的embedding和reranker模型进行RAG流程的评测,吸引广泛关注。下面我们按照该评测流程验证`BCEmbedding`在RAG中的效果。 First, install LlamaIndex: ```bash pip install llama-index==0.9.22 ``` #### 1. Metrics Definition - Hit Rate: Hit rate calculates the fraction of queries where the correct answer is found within the top-k retrieved documents. In simpler terms, it's about how often our system gets it right within the top few guesses. ***The larger, the better.*** - Mean Reciprocal Rank (MRR): For each query, MRR evaluates the system's accuracy by looking at the rank of the highest-placed relevant document. Specifically, it's the average of the reciprocals of these ranks across all the queries. So, if the first relevant document is the top result, the reciprocal rank is 1; if it's second, the reciprocal rank is 1/2, and so on. ***The larger, the better.*** - 命中率(Hit Rate) 命中率计算的是在检索的前k个文档中找到正确答案的查询所占的比例。简单来说,它反映了我们的系统在前几次猜测中答对的频率。***该指标越大越好。*** - 平均倒数排名(Mean Reciprocal Rank,MRR) 对于每个查询,MRR通过查看最高排名的相关文档的排名来评估系统的准确性。具体来说,它是在所有查询中这些排名的倒数的平均值。因此,如果第一个相关文档是排名最靠前的结果,倒数排名就是1;如果是第二个,倒数排名就是1/2,依此类推。***该指标越大越好。*** #### 2. Reproduce [LlamaIndex Blog](https://blog.llamaindex.ai/boosting-rag-picking-the-best-embedding-reranker-models-42d079022e83) In order to compare our `BCEmbedding` with other embedding and reranker models fairly, we provide a one-click script to reproduce results of the LlamaIndex Blog, including our `BCEmbedding`: 为了公平起见,运行下面脚本,复现LlamaIndex博客的结果,将`BCEmbedding`与其他embedding和reranker模型进行对比分析: ```bash # There should be two GPUs available at least. CUDA_VISIBLE_DEVICES=0,1 python BCEmbedding/tools/eval_rag/eval_llamaindex_reproduce.py ``` Then, sumarize the evaluation results by: ```bash python BCEmbedding/tools/eval_rag/summarize_eval_results.py --results_dir results/rag_reproduce_results ``` Results Reproduced from the LlamaIndex Blog can be checked in ***[Reproduced Summary of RAG Evaluation](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/rag_eval_reproduced_summary.md)***, with some obvious ***conclusions***: - In `WithoutReranker` setting, our `bce-embedding-base_v1` outperforms all the other embedding models. - With fixing the embedding model, our `bce-reranker-base_v1` achieves the best performence. - ***The combination of `bce-embedding-base_v1` and `bce-reranker-base_v1` is SOTA.*** 输出的指标汇总详见 ***[LlamaIndex RAG评测结果复现](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/rag_eval_reproduced_summary.md)***。从该复现结果中,可以看出: - 在`WithoutReranker`设置下(**竖排对比**),`bce-embedding-base_v1`比其他embedding模型效果都要好。 - 在固定embedding模型设置下,对比不同reranker效果(**横排对比**),`bce-reranker-base_v1`比其他reranker模型效果都要好。 - ***`bce-embedding-base_v1`和`bce-reranker-base_v1`组合,表现SOTA。*** #### 3. Broad Domain Adaptability The evaluation of [LlamaIndex Blog](https://blog.llamaindex.ai/boosting-rag-picking-the-best-embedding-reranker-models-42d079022e83) is **monolingual, small amount of data, and specific domain** (just including "llama2" paper). In order to evaluate the **broad domain adaptability, bilingual and crosslingual capability**, we follow the blog to build a multiple domains evaluation dataset (includding "Computer Science", "Physics", "Biology", "Economics", "Math", and "Quantitative Finance"), named [CrosslingualMultiDomainsDataset](https://huggingface.co/datasets/maidalun1020/CrosslingualMultiDomainsDataset), **by OpenAI `gpt-4-1106-preview` for high quality**. 在上述的[LlamaIndex博客](https://blog.llamaindex.ai/boosting-rag-picking-the-best-embedding-reranker-models-42d079022e83)的评测数据只用了“llama2”这一篇文章,该评测是 **单语种,小数据量,特定领域** 的。为了兼容更真实更广的用户使用场景,评测算法模型的 **领域泛化性,双语和跨语种能力**,我们按照该博客的方法构建了一个多领域(计算机科学,物理学,生物学,经济学,数学,量化金融等)的双语种、跨语种评测数据,[CrosslingualMultiDomainsDataset](https://huggingface.co/datasets/maidalun1020/CrosslingualMultiDomainsDataset)。**为了保证构建数据的高质量,我们采用OpenAI的`gpt-4-1106-preview`。** First, run following cmd to evaluate the most popular and powerful embedding and reranker models: ```bash # There should be two GPUs available at least. CUDA_VISIBLE_DEVICES=0,1 python BCEmbedding/tools/eval_rag/eval_llamaindex_multiple_domains.py ``` Then, run the following script to sumarize the evaluation results: ```bash python BCEmbedding/tools/eval_rag/summarize_eval_results.py --results_dir results/rag_results ``` The summary of multiple domains evaluations can be seen in <a href=#1-multiple-domains-scenarios>Multiple Domains Scenarios</a>. ## 📈 Leaderboard ### Semantic Representation Evaluations in MTEB #### 1. Embedding Models | Model | Dimensions | Pooler | Instructions | Retrieval (47) | STS (19) | PairClassification (5) | Classification (21) | Reranking (12) | Clustering (15) | ***AVG*** (119) | |:--------|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:| | bge-base-en-v1.5 | 768 | `cls` | Need | 37.14 | 55.06 | 75.45 | 59.73 | 43.00 | 37.74 | 47.19 | | bge-base-zh-v1.5 | 768 | `cls` | Need | 47.63 | 63.72 | 77.40 | 63.38 | 54.95 | 32.56 | 53.62 | | bge-large-en-v1.5 | 1024 | `cls` | Need | 37.18 | 54.09 | 75.00 | 59.24 | 42.47 | 37.32 | 46.80 | | bge-large-zh-v1.5 | 1024 | `cls` | Need | 47.58 | 64.73 | 79.14 | 64.19 | 55.98 | 33.26 | 54.23 | | e5-large-v2 | 1024 | `mean` | Need | 35.98 | 55.23 | 75.28 | 59.53 | 42.12 | 36.51 | 46.52 | | gte-large | 1024 | `mean` | Free | 36.68 | 55.22 | 74.29 | 57.73 | 42.44 | 38.51 | 46.67 | | gte-large-zh | 1024 | `cls` | Free | 41.15 | 64.62 | 77.58 | 62.04 | 55.62 | 33.03 | 51.51 | | jina-embeddings-v2-base-en | 768 | `mean` | Free | 31.58 | 54.28 | 74.84 | 58.42 | 41.16 | 34.67 | 44.29 | | m3e-base | 768 | `mean` | Free | 46.29 | 63.93 | 71.84 | 64.08 | 52.38 | 37.84 | 53.54 | | m3e-large | 1024 | `mean` | Free | 34.85 | 59.74 | 67.69 | 60.07 | 48.99 | 31.62 | 46.78 | | multilingual-e5-base | 768 | `mean` | Need | 54.73 | 65.49 | 76.97 | 69.72 | 55.01 | 38.44 | 58.34 | | multilingual-e5-large | 1024 | `mean` | Need | 56.76 | 66.79 | 78.80 | 71.61 | 56.49 | 43.09 | 60.50 | | ***bce-embedding-base_v1*** | 768 | `cls` | Free | 57.60 | 65.73 | 74.96 | 69.00 | 57.29 | 38.95 | 59.43 | ***NOTE:*** - Our ***bce-embedding-base_v1*** outperforms other opensource embedding models with comparable model size. - ***114 datastes*** of **"Retrieval", "STS", "PairClassification", "Classification", "Reranking" and "Clustering"** in `["en", "zh", "en-zh", "zh-en"]` setting. - The [crosslingual evaluation datasets](https://github.com/netease-youdao/BCEmbedding/blob/master/BCEmbedding/evaluation/c_mteb/Retrieval.py) we released belong to `Retrieval` task. - More evaluation details please check [Embedding Models Evaluation Summary](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/embedding_eval_summary.md). ***要点:*** - 对比其他开源的相同规模的embedding模型,***bce-embedding-base_v1*** 表现最好,效果比最好的large模型稍差。 - 评测包含 **"Retrieval", "STS", "PairClassification", "Classification", "Reranking"和"Clustering"** 这六大类任务的共 ***114个数据集***。 - 我们开源的[跨语种语义表征评测数据](https://github.com/netease-youdao/BCEmbedding/blob/master/BCEmbedding/evaluation/c_mteb/Retrieval.py)属于`Retrieval`任务。 - 更详细的评测结果详见[Embedding模型指标汇总](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/embedding_eval_summary.md)。 #### 2. Reranker Models | Model | Reranking (12) | ***AVG*** (12) | | :--------------------------------- | :-------------: | :--------------------: | | bge-reranker-base | 59.04 | 59.04 | | bge-reranker-large | 60.86 | 60.86 | | ***bce-reranker-base_v1*** | **61.29** | ***61.29*** | ***NOTE:*** - Our ***bce-reranker-base_v1*** outperforms other opensource reranker models. - ***12 datastes*** of **"Reranking"** in `["en", "zh", "en-zh", "zh-en"]` setting. - More evaluation details please check [Reranker Models Evaluation Summary](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/reranker_eval_summary.md). ***要点:*** - ***bce-reranker-base_v1*** 优于其他开源reranker模型。 - 评测包含 **"Reranking"** 任务的 ***12个数据集***。 - 更详细的评测结果详见[Reranker模型指标汇总](https://github.com/netease-youdao/BCEmbedding/blob/master/Docs/EvaluationSummary/reranker_eval_summary.md) ### RAG Evaluations in LlamaIndex #### 1. Multiple Domains Scenarios ![image/jpeg](assets/rag_eval_multiple_domains_summary.jpg) ***NOTE:*** - Evaluated in **`["en", "zh", "en-zh", "zh-en"]` setting**. - In `WithoutReranker` setting, our `bce-embedding-base_v1` outperforms all the other embedding models. - With fixing the embedding model, our `bce-reranker-base_v1` achieves the best performence. - **The combination of `bce-embedding-base_v1` and `bce-reranker-base_v1` is SOTA**. ***要点:*** - 评测是在`["en", "zh", "en-zh", "zh-en"]`设置下。 - 在`WithoutReranker`设置下(**竖排对比**),`bce-embedding-base_v1`优于其他Embedding模型,包括开源和闭源。 - 在固定Embedding模型设置下,对比不同reranker效果(**横排对比**),`bce-reranker-base_v1`比其他reranker模型效果都要好,包括开源和闭源。 - ***`bce-embedding-base_v1`和`bce-reranker-base_v1`组合,表现SOTA。*** ## 🛠 Youdao's BCEmbedding API For users who prefer a hassle-free experience without the need to download and configure the model on their own systems, `BCEmbedding` is readily accessible through Youdao's API. This option offers a streamlined and efficient way to integrate BCEmbedding into your projects, bypassing the complexities of manual setup and maintenance. Detailed instructions and comprehensive API documentation are available at [Youdao BCEmbedding API](https://ai.youdao.com/DOCSIRMA/html/aigc/api/embedding/index.html). Here, you'll find all the necessary guidance to easily implement `BCEmbedding` across a variety of use cases, ensuring a smooth and effective integration for optimal results. 对于那些更喜欢直接调用api的用户,有道提供方便的`BCEmbedding`调用api。该方式是一种简化和高效的方式,将`BCEmbedding`集成到您的项目中,避开了手动设置和系统维护的复杂性。更详细的api调用接口说明详见[有道BCEmbedding API](https://ai.youdao.com/DOCSIRMA/html/aigc/api/embedding/index.html)。 ## 🧲 WeChat Group Welcome to scan the QR code below and join the WeChat group. 欢迎大家扫码加入官方微信交流群。 ![image/jpeg](assets/Wechat.jpg) ## ✏️ Citation If you use `BCEmbedding` in your research or project, please feel free to cite and star it: 如果在您的研究或任何项目中使用本工作,烦请按照下方进行引用,并打个小星星~ ``` @misc{youdao_bcembedding_2023, title={BCEmbedding: Bilingual and Crosslingual Embedding for RAG}, author={NetEase Youdao, Inc.}, year={2023}, howpublished={\url{https://github.com/netease-youdao/BCEmbedding}} } ``` ## 🔐 License `BCEmbedding` is licensed under [Apache 2.0 License](https://github.com/netease-youdao/BCEmbedding/blob/master/LICENSE) ## 🔗 Related Links [Netease Youdao - QAnything](https://github.com/netease-youdao/qanything) [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding) [MTEB](https://github.com/embeddings-benchmark/mteb) [C_MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB) [LLama Index](https://github.com/run-llama/llama_index) | [LlamaIndex Blog](https://blog.llamaindex.ai/boosting-rag-picking-the-best-embedding-reranker-models-42d079022e83)
ai-forever/sbert_large_nlu_ru
ai-forever
"2023-10-28T10:40:17Z"
558,575
41
transformers
[ "transformers", "pytorch", "jax", "bert", "feature-extraction", "PyTorch", "Transformers", "ru", "endpoints_compatible", "has_space", "region:us" ]
feature-extraction
"2022-03-02T23:29:05Z"
--- language: - ru tags: - PyTorch - Transformers --- # BERT large model (uncased) for Sentence Embeddings in Russian language. The model is described [in this article](https://habr.com/ru/company/sberdevices/blog/527576/) For better quality, use mean token embeddings. ## Usage (HuggingFace Models Repository) You can use the model directly from the model repository to compute sentence embeddings: ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1) sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9) return sum_embeddings / sum_mask #Sentences we want sentence embeddings for sentences = ['Привет! Как твои дела?', 'А правда, что 42 твое любимое число?'] #Load AutoModel from huggingface model repository tokenizer = AutoTokenizer.from_pretrained("ai-forever/sbert_large_nlu_ru") model = AutoModel.from_pretrained("ai-forever/sbert_large_nlu_ru") #Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, max_length=24, return_tensors='pt') #Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) #Perform pooling. In this case, mean pooling sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) ``` # Authors + [SberDevices](https://sberdevices.ru/) Team. + Aleksandr Abramov: [HF profile](https://huggingface.co/Andrilko), [Github](https://github.com/Ab1992ao), [Kaggle Competitions Master](https://www.kaggle.com/andrilko); + Denis Antykhov: [Github](https://github.com/gaphex);
sai17/cards-top_right_swin-tiny-patch4-window7-224-finetuned-v2_more_data
sai17
"2024-02-20T17:23:19Z"
549,700
0
transformers
[ "transformers", "tensorboard", "safetensors", "swin", "image-classification", "generated_from_trainer", "dataset:imagefolder", "base_model:microsoft/swin-tiny-patch4-window7-224", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
"2024-02-19T09:26:21Z"
--- license: apache-2.0 base_model: microsoft/swin-tiny-patch4-window7-224 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: cards-top_right_swin-tiny-patch4-window7-224-finetuned-v2_more_data results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder config: default split: test args: default metrics: - name: Accuracy type: accuracy value: 0.6269272417882741 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # cards-top_right_swin-tiny-patch4-window7-224-finetuned-v2_more_data This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.9268 - Accuracy: 0.6269 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 1.4585 | 1.0 | 1363 | 1.2999 | 0.4337 | | 1.4211 | 2.0 | 2726 | 1.1663 | 0.4927 | | 1.4203 | 3.0 | 4089 | 1.0770 | 0.5312 | | 1.4669 | 4.0 | 5453 | 1.0744 | 0.5496 | | 1.3781 | 5.0 | 6816 | 1.0245 | 0.5599 | | 1.3852 | 6.0 | 8179 | 1.0645 | 0.5402 | | 1.3407 | 7.0 | 9542 | 1.0011 | 0.5696 | | 1.3727 | 8.0 | 10906 | 0.9898 | 0.5801 | | 1.328 | 9.0 | 12269 | 0.9965 | 0.5738 | | 1.3374 | 10.0 | 13632 | 0.9722 | 0.5874 | | 1.3513 | 11.0 | 14995 | 0.9632 | 0.5873 | | 1.3728 | 12.0 | 16359 | 0.9818 | 0.5802 | | 1.3289 | 13.0 | 17722 | 0.9845 | 0.5729 | | 1.3219 | 14.0 | 19085 | 0.9633 | 0.5881 | | 1.2893 | 15.0 | 20448 | 0.9312 | 0.6004 | | 1.3088 | 16.0 | 21812 | 0.9537 | 0.5903 | | 1.3252 | 17.0 | 23175 | 0.9432 | 0.5986 | | 1.3424 | 18.0 | 24538 | 0.9291 | 0.5979 | | 1.3077 | 19.0 | 25901 | 0.9245 | 0.6020 | | 1.2466 | 20.0 | 27265 | 0.9304 | 0.6039 | | 1.2767 | 21.0 | 28628 | 0.9122 | 0.6099 | | 1.2553 | 22.0 | 29991 | 0.9312 | 0.6005 | | 1.2698 | 23.0 | 31354 | 0.9137 | 0.6092 | | 1.2591 | 24.0 | 32718 | 0.9113 | 0.6134 | | 1.277 | 25.0 | 34081 | 0.9095 | 0.6142 | | 1.2742 | 26.0 | 35444 | 0.9227 | 0.6100 | | 1.222 | 27.0 | 36807 | 0.9090 | 0.6147 | | 1.2368 | 28.0 | 38171 | 0.9020 | 0.6172 | | 1.198 | 29.0 | 39534 | 0.9071 | 0.6157 | | 1.2076 | 30.0 | 40897 | 0.9031 | 0.6214 | | 1.212 | 31.0 | 42260 | 0.9136 | 0.6175 | | 1.2105 | 32.0 | 43624 | 0.9170 | 0.6151 | | 1.2687 | 33.0 | 44987 | 0.9047 | 0.6186 | | 1.2038 | 34.0 | 46350 | 0.9061 | 0.6190 | | 1.1957 | 35.0 | 47713 | 0.9052 | 0.6255 | | 1.1962 | 36.0 | 49077 | 0.9057 | 0.6210 | | 1.1866 | 37.0 | 50440 | 0.9105 | 0.6227 | | 1.2545 | 38.0 | 51803 | 0.9173 | 0.6206 | | 1.1642 | 39.0 | 53166 | 0.9120 | 0.6239 | | 1.1711 | 40.0 | 54530 | 0.9235 | 0.6177 | | 1.2339 | 41.0 | 55893 | 0.9295 | 0.6143 | | 1.1132 | 42.0 | 57256 | 0.9143 | 0.6234 | | 1.1977 | 43.0 | 58619 | 0.9163 | 0.6256 | | 1.1617 | 44.0 | 59983 | 0.9246 | 0.6233 | | 1.1357 | 45.0 | 61346 | 0.9196 | 0.6255 | | 1.1362 | 46.0 | 62709 | 0.9221 | 0.6259 | | 1.1472 | 47.0 | 64072 | 0.9206 | 0.6263 | | 1.184 | 48.0 | 65436 | 0.9282 | 0.6256 | | 1.1096 | 49.0 | 66799 | 0.9252 | 0.6269 | | 1.1425 | 49.99 | 68150 | 0.9268 | 0.6269 | ### Framework versions - Transformers 4.37.2 - Pytorch 2.0.1+cu117 - Datasets 2.17.0 - Tokenizers 0.15.2
sai17/cards_bottom_right_swin-tiny-patch4-window7-224-finetuned-v2
sai17
"2024-02-17T01:44:22Z"
549,253
0
transformers
[ "transformers", "tensorboard", "safetensors", "swin", "image-classification", "generated_from_trainer", "dataset:imagefolder", "base_model:microsoft/swin-tiny-patch4-window7-224", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
"2024-02-15T15:30:28Z"
--- license: apache-2.0 base_model: microsoft/swin-tiny-patch4-window7-224 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: cards_bottom_right_swin-tiny-patch4-window7-224-finetuned-v2 results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder config: default split: test args: default metrics: - name: Accuracy type: accuracy value: 0.6078575555438837 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # cards_bottom_right_swin-tiny-patch4-window7-224-finetuned-v2 This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.9317 - Accuracy: 0.6079 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 1.4965 | 1.0 | 1338 | 1.3516 | 0.4156 | | 1.4486 | 2.0 | 2677 | 1.1784 | 0.4938 | | 1.4384 | 3.0 | 4015 | 1.1050 | 0.5223 | | 1.4538 | 4.0 | 5354 | 1.0751 | 0.5433 | | 1.3928 | 5.0 | 6692 | 1.0604 | 0.5440 | | 1.4148 | 6.0 | 8031 | 1.0459 | 0.5523 | | 1.3921 | 7.0 | 9369 | 1.0464 | 0.5501 | | 1.3812 | 8.0 | 10708 | 1.0461 | 0.5491 | | 1.3494 | 9.0 | 12046 | 1.0445 | 0.5486 | | 1.3555 | 10.0 | 13385 | 0.9973 | 0.5693 | | 1.3303 | 11.0 | 14723 | 0.9952 | 0.5719 | | 1.3575 | 12.0 | 16062 | 1.0317 | 0.5574 | | 1.3129 | 13.0 | 17400 | 0.9851 | 0.5813 | | 1.3439 | 14.0 | 18739 | 1.0510 | 0.5523 | | 1.3371 | 15.0 | 20077 | 0.9820 | 0.5795 | | 1.2835 | 16.0 | 21416 | 0.9886 | 0.5738 | | 1.3002 | 17.0 | 22754 | 0.9685 | 0.5869 | | 1.289 | 18.0 | 24093 | 0.9519 | 0.5941 | | 1.3007 | 19.0 | 25431 | 0.9855 | 0.5800 | | 1.2927 | 20.0 | 26770 | 0.9499 | 0.5925 | | 1.2985 | 21.0 | 28108 | 0.9669 | 0.5854 | | 1.2957 | 22.0 | 29447 | 0.9551 | 0.5903 | | 1.2579 | 23.0 | 30785 | 0.9300 | 0.6053 | | 1.2475 | 24.0 | 32124 | 0.9296 | 0.6049 | | 1.2227 | 25.0 | 33462 | 0.9317 | 0.6079 | | 1.2069 | 26.0 | 34801 | 0.9609 | 0.5887 | | 1.2156 | 27.0 | 36139 | 0.9297 | 0.6052 | | 1.25 | 28.0 | 37478 | 0.9300 | 0.6062 | | 1.2394 | 29.0 | 38816 | 0.9238 | 0.6071 | | 1.209 | 29.99 | 40140 | 0.9284 | 0.6064 | ### Framework versions - Transformers 4.37.2 - Pytorch 2.0.1+cu117 - Datasets 2.17.0 - Tokenizers 0.15.2
google-t5/t5-large
google-t5
"2023-04-06T13:42:27Z"
545,857
141
transformers
[ "transformers", "pytorch", "tf", "jax", "safetensors", "t5", "text2text-generation", "summarization", "translation", "en", "fr", "ro", "de", "multilingual", "dataset:c4", "arxiv:1805.12471", "arxiv:1708.00055", "arxiv:1704.05426", "arxiv:1606.05250", "arxiv:1808.09121", "arxiv:1810.12885", "arxiv:1905.10044", "arxiv:1910.09700", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
translation
"2022-03-02T23:29:04Z"
--- language: - en - fr - ro - de - multilingual license: apache-2.0 tags: - summarization - translation datasets: - c4 --- # Model Card for T5 Large ![model image](https://camo.githubusercontent.com/623b4dea0b653f2ad3f36c71ebfe749a677ac0a1/68747470733a2f2f6d69726f2e6d656469756d2e636f6d2f6d61782f343030362f312a44304a31674e51663876727255704b657944387750412e706e67) # Table of Contents 1. [Model Details](#model-details) 2. [Uses](#uses) 3. [Bias, Risks, and Limitations](#bias-risks-and-limitations) 4. [Training Details](#training-details) 5. [Evaluation](#evaluation) 6. [Environmental Impact](#environmental-impact) 7. [Citation](#citation) 8. [Model Card Authors](#model-card-authors) 9. [How To Get Started With the Model](#how-to-get-started-with-the-model) # Model Details ## Model Description The developers of the Text-To-Text Transfer Transformer (T5) [write](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html): > With T5, we propose reframing all NLP tasks into a unified text-to-text-format where the input and output are always text strings, in contrast to BERT-style models that can only output either a class label or a span of the input. Our text-to-text framework allows us to use the same model, loss function, and hyperparameters on any NLP task. T5-Large is the checkpoint with 770 million parameters. - **Developed by:** Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. See [associated paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) and [GitHub repo](https://github.com/google-research/text-to-text-transfer-transformer#released-model-checkpoints) - **Model type:** Language model - **Language(s) (NLP):** English, French, Romanian, German - **License:** Apache 2.0 - **Related Models:** [All T5 Checkpoints](https://huggingface.co/models?search=t5) - **Resources for more information:** - [Research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) - [Google's T5 Blog Post](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) - [GitHub Repo](https://github.com/google-research/text-to-text-transfer-transformer) - [Hugging Face T5 Docs](https://huggingface.co/docs/transformers/model_doc/t5) # Uses ## Direct Use and Downstream Use The developers write in a [blog post](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) that the model: > Our text-to-text framework allows us to use the same model, loss function, and hyperparameters on any NLP task, including machine translation, document summarization, question answering, and classification tasks (e.g., sentiment analysis). We can even apply T5 to regression tasks by training it to predict the string representation of a number instead of the number itself. See the [blog post](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) and [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) for further details. ## Out-of-Scope Use More information needed. # Bias, Risks, and Limitations More information needed. ## Recommendations More information needed. # Training Details ## Training Data The model is pre-trained on the [Colossal Clean Crawled Corpus (C4)](https://www.tensorflow.org/datasets/catalog/c4), which was developed and released in the context of the same [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) as T5. The model was pre-trained on a on a **multi-task mixture of unsupervised (1.) and supervised tasks (2.)**. Thereby, the following datasets were being used for (1.) and (2.): 1. **Datasets used for Unsupervised denoising objective**: - [C4](https://huggingface.co/datasets/c4) - [Wiki-DPR](https://huggingface.co/datasets/wiki_dpr) 2. **Datasets used for Supervised text-to-text language modeling objective** - Sentence acceptability judgment - CoLA [Warstadt et al., 2018](https://arxiv.org/abs/1805.12471) - Sentiment analysis - SST-2 [Socher et al., 2013](https://nlp.stanford.edu/~socherr/EMNLP2013_RNTN.pdf) - Paraphrasing/sentence similarity - MRPC [Dolan and Brockett, 2005](https://aclanthology.org/I05-5002) - STS-B [Ceret al., 2017](https://arxiv.org/abs/1708.00055) - QQP [Iyer et al., 2017](https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs) - Natural language inference - MNLI [Williams et al., 2017](https://arxiv.org/abs/1704.05426) - QNLI [Rajpurkar et al.,2016](https://arxiv.org/abs/1606.05250) - RTE [Dagan et al., 2005](https://link.springer.com/chapter/10.1007/11736790_9) - CB [De Marneff et al., 2019](https://semanticsarchive.net/Archive/Tg3ZGI2M/Marneffe.pdf) - Sentence completion - COPA [Roemmele et al., 2011](https://www.researchgate.net/publication/221251392_Choice_of_Plausible_Alternatives_An_Evaluation_of_Commonsense_Causal_Reasoning) - Word sense disambiguation - WIC [Pilehvar and Camacho-Collados, 2018](https://arxiv.org/abs/1808.09121) - Question answering - MultiRC [Khashabi et al., 2018](https://aclanthology.org/N18-1023) - ReCoRD [Zhang et al., 2018](https://arxiv.org/abs/1810.12885) - BoolQ [Clark et al., 2019](https://arxiv.org/abs/1905.10044) ## Training Procedure In their [abstract](https://jmlr.org/papers/volume21/20-074/20-074.pdf), the model developers write: > In this paper, we explore the landscape of transfer learning techniques for NLP by introducing a unified framework that converts every language problem into a text-to-text format. Our systematic study compares pre-training objectives, architectures, unlabeled datasets, transfer approaches, and other factors on dozens of language understanding tasks. The framework introduced, the T5 framework, involves a training procedure that brings together the approaches studied in the paper. See the [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) for further details. # Evaluation ## Testing Data, Factors & Metrics The developers evaluated the model on 24 tasks, see the [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) for full details. ## Results For full results for T5-Large, see the [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf), Table 14. # Environmental Impact Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** Google Cloud TPU Pods - **Hours used:** More information needed - **Cloud Provider:** GCP - **Compute Region:** More information needed - **Carbon Emitted:** More information needed # Citation **BibTeX:** ```bibtex @article{2020t5, author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu}, title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer}, journal = {Journal of Machine Learning Research}, year = {2020}, volume = {21}, number = {140}, pages = {1-67}, url = {http://jmlr.org/papers/v21/20-074.html} } ``` **APA:** - Raffel, C., Shazeer, N., Roberts, A., Lee, K., Narang, S., Matena, M., ... & Liu, P. J. (2020). Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res., 21(140), 1-67. # Model Card Authors This model card was written by the team at Hugging Face. # How to Get Started with the Model Use the code below to get started with the model. <details> <summary> Click to expand </summary> ```python from transformers import T5Tokenizer, T5Model tokenizer = T5Tokenizer.from_pretrained("t5-large") model = T5Model.from_pretrained("t5-large") input_ids = tokenizer( "Studies have been shown that owning a dog is good for you", return_tensors="pt" ).input_ids # Batch size 1 decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 # forward pass outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) last_hidden_states = outputs.last_hidden_state ``` See the [Hugging Face T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Model) docs and a [Colab Notebook](https://colab.research.google.com/github/google-research/text-to-text-transfer-transformer/blob/main/notebooks/t5-trivia.ipynb) created by the model developers for more examples. </details>
microsoft/deberta-v3-base
microsoft
"2022-09-22T12:34:19Z"
543,576
170
transformers
[ "transformers", "pytorch", "tf", "rust", "deberta-v2", "deberta", "deberta-v3", "fill-mask", "en", "arxiv:2006.03654", "arxiv:2111.09543", "license:mit", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
"2022-03-02T23:29:05Z"
--- language: en tags: - deberta - deberta-v3 - fill-mask thumbnail: https://huggingface.co/front/thumbnails/microsoft.png license: mit --- ## DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing [DeBERTa](https://arxiv.org/abs/2006.03654) improves the BERT and RoBERTa models using disentangled attention and enhanced mask decoder. With those two improvements, DeBERTa out perform RoBERTa on a majority of NLU tasks with 80GB training data. In [DeBERTa V3](https://arxiv.org/abs/2111.09543), we further improved the efficiency of DeBERTa using ELECTRA-Style pre-training with Gradient Disentangled Embedding Sharing. Compared to DeBERTa, our V3 version significantly improves the model performance on downstream tasks. You can find more technique details about the new model from our [paper](https://arxiv.org/abs/2111.09543). Please check the [official repository](https://github.com/microsoft/DeBERTa) for more implementation details and updates. The DeBERTa V3 base model comes with 12 layers and a hidden size of 768. It has only 86M backbone parameters with a vocabulary containing 128K tokens which introduces 98M parameters in the Embedding layer. This model was trained using the 160GB data as DeBERTa V2. #### Fine-tuning on NLU tasks We present the dev results on SQuAD 2.0 and MNLI tasks. | Model |Vocabulary(K)|Backbone #Params(M)| SQuAD 2.0(F1/EM) | MNLI-m/mm(ACC)| |-------------------|----------|-------------------|-----------|----------| | RoBERTa-base |50 |86 | 83.7/80.5 | 87.6/- | | XLNet-base |32 |92 | -/80.2 | 86.8/- | | ELECTRA-base |30 |86 | -/80.5 | 88.8/ | | DeBERTa-base |50 |100 | 86.2/83.1| 88.8/88.5| | DeBERTa-v3-base |128|86 | **88.4/85.4** | **90.6/90.7**| | DeBERTa-v3-base + SiFT |128|86 | -/- | 91.0/-| We present the dev results on SQuAD 1.1/2.0 and MNLI tasks. #### Fine-tuning with HF transformers ```bash #!/bin/bash cd transformers/examples/pytorch/text-classification/ pip install datasets export TASK_NAME=mnli output_dir="ds_results" num_gpus=8 batch_size=8 python -m torch.distributed.launch --nproc_per_node=${num_gpus} \ run_glue.py \ --model_name_or_path microsoft/deberta-v3-base \ --task_name $TASK_NAME \ --do_train \ --do_eval \ --evaluation_strategy steps \ --max_seq_length 256 \ --warmup_steps 500 \ --per_device_train_batch_size ${batch_size} \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir $output_dir \ --overwrite_output_dir \ --logging_steps 1000 \ --logging_dir $output_dir ``` ### Citation If you find DeBERTa useful for your work, please cite the following papers: ``` latex @misc{he2021debertav3, title={DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing}, author={Pengcheng He and Jianfeng Gao and Weizhu Chen}, year={2021}, eprint={2111.09543}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ``` latex @inproceedings{ he2021deberta, title={DEBERTA: DECODING-ENHANCED BERT WITH DISENTANGLED ATTENTION}, author={Pengcheng He and Xiaodong Liu and Jianfeng Gao and Weizhu Chen}, booktitle={International Conference on Learning Representations}, year={2021}, url={https://openreview.net/forum?id=XPZIaotutsD} } ```
HuggingFaceM4/tiny-random-LlamaForCausalLM
HuggingFaceM4
"2023-10-17T08:20:32Z"
541,245
16
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2023-03-16T23:18:46Z"
Entry not found
pysentimiento/robertuito-emotion-analysis
pysentimiento
"2023-02-20T19:04:28Z"
537,942
15
pysentimiento
[ "pysentimiento", "pytorch", "roberta", "emotion-analysis", "twitter", "es", "arxiv:2106.09462", "has_space", "region:us" ]
null
"2022-03-02T23:29:05Z"
--- language: - es library_name: pysentimiento tags: - emotion-analysis - twitter --- # Emotion Analysis in Spanish ## robertuito-emotion-analysis Repository: [https://github.com/pysentimiento/pysentimiento/](https://github.com/finiteautomata/pysentimiento/) Model trained with TASS 2020 Task 2 corpus for Emotion detection in Spanish. Base model is [RoBERTuito](https://github.com/pysentimiento/robertuito), a RoBERTa model trained in Spanish tweets. Contains the six Ekman emotions plus a neutral class: - anger - disgust - fear - joy - sadness - surprise ## Results Results for the four tasks evaluated in `pysentimiento`. Results are expressed as Macro F1 scores | model | emotion | hate_speech | irony | sentiment | |:--------------|:--------------|:--------------|:--------------|:--------------| | robertuito | 0.560 ± 0.010 | 0.759 ± 0.007 | 0.739 ± 0.005 | 0.705 ± 0.003 | | roberta | 0.527 ± 0.015 | 0.741 ± 0.012 | 0.721 ± 0.008 | 0.670 ± 0.006 | | bertin | 0.524 ± 0.007 | 0.738 ± 0.007 | 0.713 ± 0.012 | 0.666 ± 0.005 | | beto_uncased | 0.532 ± 0.012 | 0.727 ± 0.016 | 0.701 ± 0.007 | 0.651 ± 0.006 | | beto_cased | 0.516 ± 0.012 | 0.724 ± 0.012 | 0.705 ± 0.009 | 0.662 ± 0.005 | | mbert_uncased | 0.493 ± 0.010 | 0.718 ± 0.011 | 0.681 ± 0.010 | 0.617 ± 0.003 | | biGRU | 0.264 ± 0.007 | 0.592 ± 0.018 | 0.631 ± 0.011 | 0.585 ± 0.011 | Note that for Hate Speech, these are the results for Semeval 2019, Task 5 Subtask B (HS+TR+AG detection) ## Citation If you use this model in your research, please cite pysentimiento, RoBERTuito and EmoEvent papers: ``` @misc{perez2021pysentimiento, title={pysentimiento: A Python Toolkit for Sentiment Analysis and SocialNLP tasks}, author={Juan Manuel Pérez and Juan Carlos Giudici and Franco Luque}, year={2021}, eprint={2106.09462}, archivePrefix={arXiv}, primaryClass={cs.CL} } @inproceedings{del2020emoevent, title={EmoEvent: A multilingual emotion corpus based on different events}, author={del Arco, Flor Miriam Plaza and Strapparava, Carlo and Lopez, L Alfonso Urena and Mart{\'\i}n-Valdivia, M Teresa}, booktitle={Proceedings of the 12th Language Resources and Evaluation Conference}, pages={1492--1498}, year={2020} } @inproceedings{perez-etal-2022-robertuito, title = "{R}o{BERT}uito: a pre-trained language model for social media text in {S}panish", author = "P{\'e}rez, Juan Manuel and Furman, Dami{\'a}n Ariel and Alonso Alemany, Laura and Luque, Franco M.", booktitle = "Proceedings of the Thirteenth Language Resources and Evaluation Conference", month = jun, year = "2022", address = "Marseille, France", publisher = "European Language Resources Association", url = "https://aclanthology.org/2022.lrec-1.785", pages = "7235--7243", abstract = "Since BERT appeared, Transformer language models and transfer learning have become state-of-the-art for natural language processing tasks. Recently, some works geared towards pre-training specially-crafted models for particular domains, such as scientific papers, medical documents, user-generated texts, among others. These domain-specific models have been shown to improve performance significantly in most tasks; however, for languages other than English, such models are not widely available. In this work, we present RoBERTuito, a pre-trained language model for user-generated text in Spanish, trained on over 500 million tweets. Experiments on a benchmark of tasks involving user-generated text showed that RoBERTuito outperformed other pre-trained language models in Spanish. In addition to this, our model has some cross-lingual abilities, achieving top results for English-Spanish tasks of the Linguistic Code-Switching Evaluation benchmark (LinCE) and also competitive performance against monolingual models in English Twitter tasks. To facilitate further research, we make RoBERTuito publicly available at the HuggingFace model hub together with the dataset used to pre-train it.", } ```
ramsrigouthamg/t5_squad_v1
ramsrigouthamg
"2021-06-23T13:48:31Z"
534,798
7
transformers
[ "transformers", "pytorch", "t5", "text2text-generation", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text2text-generation
"2022-03-02T23:29:05Z"
Entry not found
microsoft/mdeberta-v3-base
microsoft
"2023-04-06T05:32:33Z"
528,008
122
transformers
[ "transformers", "pytorch", "tf", "deberta-v2", "deberta", "deberta-v3", "mdeberta", "fill-mask", "multilingual", "en", "ar", "bg", "de", "el", "es", "fr", "hi", "ru", "sw", "th", "tr", "ur", "vi", "zh", "arxiv:2006.03654", "arxiv:2111.09543", "license:mit", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
"2022-03-02T23:29:05Z"
--- language: - multilingual - en - ar - bg - de - el - es - fr - hi - ru - sw - th - tr - ur - vi - zh tags: - deberta - deberta-v3 - mdeberta - fill-mask thumbnail: https://huggingface.co/front/thumbnails/microsoft.png license: mit --- ## DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing [DeBERTa](https://arxiv.org/abs/2006.03654) improves the BERT and RoBERTa models using disentangled attention and enhanced mask decoder. With those two improvements, DeBERTa out perform RoBERTa on a majority of NLU tasks with 80GB training data. In [DeBERTa V3](https://arxiv.org/abs/2111.09543), we further improved the efficiency of DeBERTa using ELECTRA-Style pre-training with Gradient Disentangled Embedding Sharing. Compared to DeBERTa, our V3 version significantly improves the model performance on downstream tasks. You can find more technique details about the new model from our [paper](https://arxiv.org/abs/2111.09543). Please check the [official repository](https://github.com/microsoft/DeBERTa) for more implementation details and updates. mDeBERTa is multilingual version of DeBERTa which use the same structure as DeBERTa and was trained with CC100 multilingual data. The mDeBERTa V3 base model comes with 12 layers and a hidden size of 768. It has 86M backbone parameters with a vocabulary containing 250K tokens which introduces 190M parameters in the Embedding layer. This model was trained using the 2.5T CC100 data as XLM-R. #### Fine-tuning on NLU tasks We present the dev results on XNLI with zero-shot cross-lingual transfer setting, i.e. training with English data only, test on other languages. | Model |avg | en | fr| es | de | el | bg | ru |tr |ar |vi | th | zh | hi | sw | ur | |--------------| ----|----|----|---- |-- |-- |-- | -- |-- |-- |-- | -- | -- | -- | -- | -- | | XLM-R-base |76.2 |85.8|79.7|80.7 |78.7 |77.5 |79.6 |78.1 |74.2 |73.8 |76.5 |74.6 |76.7| 72.4| 66.5| 68.3| | mDeBERTa-base|**79.8**+/-0.2|**88.2**|**82.6**|**84.4** |**82.7** |**82.3** |**82.4** |**80.8** |**79.5** |**78.5** |**78.1** |**76.4** |**79.5**| **75.9**| **73.9**| **72.4**| #### Fine-tuning with HF transformers ```bash #!/bin/bash cd transformers/examples/pytorch/text-classification/ pip install datasets output_dir="ds_results" num_gpus=8 batch_size=4 python -m torch.distributed.launch --nproc_per_node=${num_gpus} \ run_xnli.py \ --model_name_or_path microsoft/mdeberta-v3-base \ --task_name $TASK_NAME \ --do_train \ --do_eval \ --train_language en \ --language en \ --evaluation_strategy steps \ --max_seq_length 256 \ --warmup_steps 3000 \ --per_device_train_batch_size ${batch_size} \ --learning_rate 2e-5 \ --num_train_epochs 6 \ --output_dir $output_dir \ --overwrite_output_dir \ --logging_steps 1000 \ --logging_dir $output_dir ``` ### Citation If you find DeBERTa useful for your work, please cite the following papers: ``` latex @misc{he2021debertav3, title={DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing}, author={Pengcheng He and Jianfeng Gao and Weizhu Chen}, year={2021}, eprint={2111.09543}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ``` latex @inproceedings{ he2021deberta, title={DEBERTA: DECODING-ENHANCED BERT WITH DISENTANGLED ATTENTION}, author={Pengcheng He and Xiaodong Liu and Jianfeng Gao and Weizhu Chen}, booktitle={International Conference on Learning Representations}, year={2021}, url={https://openreview.net/forum?id=XPZIaotutsD} } ```
bhadresh-savani/distilbert-base-uncased-emotion
bhadresh-savani
"2023-03-22T08:44:05Z"
526,039
96
transformers
[ "transformers", "pytorch", "tf", "jax", "distilbert", "text-classification", "emotion", "en", "dataset:emotion", "arxiv:1910.01108", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text-classification
"2022-03-02T23:29:05Z"
--- language: - en license: apache-2.0 tags: - text-classification - emotion - pytorch datasets: - emotion metrics: - Accuracy, F1 Score thumbnail: https://avatars3.githubusercontent.com/u/32437151?s=460&u=4ec59abc8d21d5feea3dab323d23a5860e6996a4&v=4 model-index: - name: bhadresh-savani/distilbert-base-uncased-emotion results: - task: type: text-classification name: Text Classification dataset: name: emotion type: emotion config: default split: test metrics: - type: accuracy value: 0.927 name: Accuracy verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiYzQxOGRmMjFlZThmZWViNjNmNGMzMTdjMGNjYjg1YWUzOTI0ZDlmYjRhYWMzMDA3Yjg2N2FiMTdmMzk0ZjJkOSIsInZlcnNpb24iOjF9.mOqr-hgNrnle7WCPy3Mo7M3fITFppn5gjpNagGMf_TZfB6VZnPKfZ51UkNFQlBtUlcm0U8vwPkF79snxwvCoDw - type: precision value: 0.8880230732280744 name: Precision Macro verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiYjZiN2NjNTkyN2M3ZWM2ZDZiNDk1OWZhN2FmNTAwZDIzMmQ3NTU2Yjk2MTgyNjJmMTNjYTYzOTc1NDdhYTljYSIsInZlcnNpb24iOjF9.0rWHmCZ2PyZ5zYkSeb_tFdQG9CHS5PdpOZ9kOfrIzEXyZ968daayaOJi2d6iO84fnauE5hZiIAUPsx24Vr4nBA - type: precision value: 0.927 name: Precision Micro verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiZmRhNWM1NDQ4ZjkyYjAxYjQ5MzQzMDA1ZDIzYWU3YTE4NTI2ZTMwYWI2ZWQ4NzQ3YzJkODYzMmZhZDI1NGRlNCIsInZlcnNpb24iOjF9.NlII1s42Mr_DMzPEoR0ntyh5cDW0405TxVkWhCgXLJTFAdnivH54-zZY4av1U5jHPTeXeWwZrrrbMwHCRBkoCw - type: precision value: 0.9272902840835793 name: Precision Weighted verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiODhkNmM5NmYyMzA4MjkwOTllZDgyMDQ1NzZkN2QzOTAyOTMyNGFlZTU4NzM5NmM5NWQ1YmUxYmRmNjA5YjhhNCIsInZlcnNpb24iOjF9.oIn1KT-BOpFNLXiKL29frMvgHhWZMHWc9Q5WgeR7UaMEO7smkK8J3j5HAMy17Ktjv2dh783-f76N6gyJ_NewCg - type: recall value: 0.8790126653780703 name: Recall Macro verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiYjhlNzczNDY2NDVlM2UwMjAzOWQxYTAyNWZkNGZlYmNjODNiZTEzMTcxNTE3MTAxNjNkOTFiMmRiMzViMzJmZiIsInZlcnNpb24iOjF9.AXp7omMuUZFJ6mzAVTQPMke7QoUtoi4RJSSE7Xbnp2pNi7y-JtznKdm---l6RfqcHPlI0jWr7TVGoFsWZ64YAg - type: recall value: 0.927 name: Recall Micro verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMjEyYmZiZDQ4MzM1ZmQ2ZmJhZWU4OTVkNmViYjA5NzhiN2MxODE0MzUxZTliZTk0MzViZDAyNGU4MDFjYjM1MSIsInZlcnNpb24iOjF9.9lazxLXbPOdwhqoYtIudwRwjfNVZnUu7KvGRklRP_RAoQStAzgmWMIrT3ckX_d5_6bKZH9fIdujUn5Qz-baKBw - type: recall value: 0.927 name: Recall Weighted verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMWVhMzY0YTA4YmQzYTg4YTBiMzQ5YzRiZWJhMjM1NjUzZGQxZmQ5M2NkZDcyNTQ0ZmJjN2NkY2ZiYjg0OWI0ZCIsInZlcnNpb24iOjF9.QgTv726WCTyvrEct0NM8Zpc3vUnDbIwCor9EH941-zpJtuWr-xpdZzYZFJfILkVA0UUn1y6Jz_ABfkfBeyZTBg - type: f1 value: 0.8825061528287809 name: F1 Macro verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNzQzZTJkMDAwOTUwMzY3ZjI2MjIxYjlmZTg3YTdhNTc4ZjYyMmQ2NDQzM2FmYzk3OGEzNjhhMTk3NTQ3OTlhNyIsInZlcnNpb24iOjF9.hSln1KfKm0plK7Qao9vlubFtAl1M7_UYHNM6La9gEZlW_apnU1Mybz03GT2XZORgOVPe9JmgygvZByxQhpsYBw - type: f1 value: 0.927 name: F1 Micro verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNzljODQ3NjE3MDRkODE3ZjFlZmY5MjYyOGJlNDQ4YzdlZGRiMTI5OGZiZWM2ODkyZjMyZWQ3MTkzYWU5YThkOCIsInZlcnNpb24iOjF9.7qfBw39fv22jSIJoY71DkOVr9eBB-srhqSi09bCcUC7Huok4O2Z_vB7gO_Rahh9sFgKVu1ZATusjTmOLQr0fBw - type: f1 value: 0.926876082854655 name: F1 Weighted verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMjJhN2UzODgxOWQ0Y2E3YTcwZTQxMDE0ZWRmYThjOWVhYWQ1YjBhMzk0YWUxNzE2ZjFhNWM5ZmE2ZmI1YTczYSIsInZlcnNpb24iOjF9.nZW0dBdLmh_FgNw6GaITvSJFX-2C_Iku3NanU8Rip7FSiRHozKPAjothdQh9MWQnq158ZZGPPVIjtyIvuTSqCw - type: loss value: 0.17403268814086914 name: loss verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMTVjZmFiOGQwZGY1OTU5YWFkNGZjMTlhOGI4NjE3MGI4ZDhkODcxYmJiYTQ3NWNmMWM0ODUyZDI1MThkYTY3ZSIsInZlcnNpb24iOjF9.OYz5BI3Lz8LgjAqVnD6NcrG3UAG0D3wjKJ7G5298RRGaNpb621ycisG_7UYiWixY7e2RJafkfRiplmkdczIFDQ --- # Distilbert-base-uncased-emotion ## Model description: [Distilbert](https://arxiv.org/abs/1910.01108) is created with knowledge distillation during the pre-training phase which reduces the size of a BERT model by 40%, while retaining 97% of its language understanding. It's smaller, faster than Bert and any other Bert-based model. [Distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) finetuned on the emotion dataset using HuggingFace Trainer with below Hyperparameters ``` learning rate 2e-5, batch size 64, num_train_epochs=8, ``` ## Model Performance Comparision on Emotion Dataset from Twitter: | Model | Accuracy | F1 Score | Test Sample per Second | | --- | --- | --- | --- | | [Distilbert-base-uncased-emotion](https://huggingface.co/bhadresh-savani/distilbert-base-uncased-emotion) | 93.8 | 93.79 | 398.69 | | [Bert-base-uncased-emotion](https://huggingface.co/bhadresh-savani/bert-base-uncased-emotion) | 94.05 | 94.06 | 190.152 | | [Roberta-base-emotion](https://huggingface.co/bhadresh-savani/roberta-base-emotion) | 93.95 | 93.97| 195.639 | | [Albert-base-v2-emotion](https://huggingface.co/bhadresh-savani/albert-base-v2-emotion) | 93.6 | 93.65 | 182.794 | ## How to Use the model: ```python from transformers import pipeline classifier = pipeline("text-classification",model='bhadresh-savani/distilbert-base-uncased-emotion', return_all_scores=True) prediction = classifier("I love using transformers. The best part is wide range of support and its easy to use", ) print(prediction) """ Output: [[ {'label': 'sadness', 'score': 0.0006792712374590337}, {'label': 'joy', 'score': 0.9959300756454468}, {'label': 'love', 'score': 0.0009452480007894337}, {'label': 'anger', 'score': 0.0018055217806249857}, {'label': 'fear', 'score': 0.00041110432357527316}, {'label': 'surprise', 'score': 0.0002288572577526793} ]] """ ``` ## Dataset: [Twitter-Sentiment-Analysis](https://huggingface.co/nlp/viewer/?dataset=emotion). ## Training procedure [Colab Notebook](https://github.com/bhadreshpsavani/ExploringSentimentalAnalysis/blob/main/SentimentalAnalysisWithDistilbert.ipynb) ## Eval results ```json { 'test_accuracy': 0.938, 'test_f1': 0.937932884041714, 'test_loss': 0.1472451239824295, 'test_mem_cpu_alloc_delta': 0, 'test_mem_cpu_peaked_delta': 0, 'test_mem_gpu_alloc_delta': 0, 'test_mem_gpu_peaked_delta': 163454464, 'test_runtime': 5.0164, 'test_samples_per_second': 398.69 } ``` ## Reference: * [Natural Language Processing with Transformer By Lewis Tunstall, Leandro von Werra, Thomas Wolf](https://learning.oreilly.com/library/view/natural-language-processing/9781098103231/)
MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7
MoritzLaurer
"2024-04-11T13:49:19Z"
525,874
202
transformers
[ "transformers", "pytorch", "onnx", "safetensors", "deberta-v2", "text-classification", "zero-shot-classification", "nli", "multilingual", "zh", "ja", "ar", "ko", "de", "fr", "es", "pt", "hi", "id", "it", "tr", "ru", "bn", "ur", "mr", "ta", "vi", "fa", "pl", "uk", "nl", "sv", "he", "sw", "ps", "dataset:MoritzLaurer/multilingual-NLI-26lang-2mil7", "dataset:xnli", "dataset:multi_nli", "dataset:facebook/anli", "dataset:fever", "dataset:lingnli", "dataset:alisawuffles/WANLI", "arxiv:2111.09543", "arxiv:2104.07179", "arxiv:1809.05053", "arxiv:1911.02116", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
zero-shot-classification
"2022-08-22T16:59:35Z"
--- language: - multilingual - zh - ja - ar - ko - de - fr - es - pt - hi - id - it - tr - ru - bn - ur - mr - ta - vi - fa - pl - uk - nl - sv - he - sw - ps license: mit tags: - zero-shot-classification - text-classification - nli - pytorch datasets: - MoritzLaurer/multilingual-NLI-26lang-2mil7 - xnli - multi_nli - facebook/anli - fever - lingnli - alisawuffles/WANLI metrics: - accuracy pipeline_tag: zero-shot-classification widget: - text: Angela Merkel ist eine Politikerin in Deutschland und Vorsitzende der CDU candidate_labels: politics, economy, entertainment, environment model-index: - name: DeBERTa-v3-base-xnli-multilingual-nli-2mil7 results: - task: type: text-classification name: Natural Language Inference dataset: name: MultiNLI-matched type: multi_nli split: validation_matched metrics: - type: accuracy value: 0,857 verified: false - task: type: text-classification name: Natural Language Inference dataset: name: MultiNLI-mismatched type: multi_nli split: validation_mismatched metrics: - type: accuracy value: 0,856 verified: false - task: type: text-classification name: Natural Language Inference dataset: name: ANLI-all type: anli split: test_r1+test_r2+test_r3 metrics: - type: accuracy value: 0,537 verified: false - task: type: text-classification name: Natural Language Inference dataset: name: ANLI-r3 type: anli split: test_r3 metrics: - type: accuracy value: 0,497 verified: false - task: type: text-classification name: Natural Language Inference dataset: name: WANLI type: alisawuffles/WANLI split: test metrics: - type: accuracy value: 0,732 verified: false - task: type: text-classification name: Natural Language Inference dataset: name: LingNLI type: lingnli split: test metrics: - type: accuracy value: 0,788 verified: false - task: type: text-classification name: Natural Language Inference dataset: name: fever-nli type: fever-nli split: test metrics: - type: accuracy value: 0,761 verified: false --- # Model card for mDeBERTa-v3-base-xnli-multilingual-nli-2mil7 ## Model description This multilingual model can perform natural language inference (NLI) on 100 languages and is therefore also suitable for multilingual zero-shot classification. The underlying mDeBERTa-v3-base model was pre-trained by Microsoft on the [CC100 multilingual dataset](https://huggingface.co/datasets/cc100) with 100 languages. The model was then fine-tuned on the [XNLI dataset](https://huggingface.co/datasets/xnli) and on the [multilingual-NLI-26lang-2mil7 dataset](https://huggingface.co/datasets/MoritzLaurer/multilingual-NLI-26lang-2mil7). Both datasets contain more than 2.7 million hypothesis-premise pairs in 27 languages spoken by more than 4 billion people. As of December 2021, mDeBERTa-v3-base is the best performing multilingual base-sized transformer model introduced by Microsoft in [this paper](https://arxiv.org/pdf/2111.09543.pdf). ### How to use the model #### Simple zero-shot classification pipeline ```python from transformers import pipeline classifier = pipeline("zero-shot-classification", model="MoritzLaurer/mDeBERTa-v3-base-mnli-xnli") sequence_to_classify = "Angela Merkel ist eine Politikerin in Deutschland und Vorsitzende der CDU" candidate_labels = ["politics", "economy", "entertainment", "environment"] output = classifier(sequence_to_classify, candidate_labels, multi_label=False) print(output) ``` #### NLI use-case ```python from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") model_name = "MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) premise = "Angela Merkel ist eine Politikerin in Deutschland und Vorsitzende der CDU" hypothesis = "Emmanuel Macron is the President of France" input = tokenizer(premise, hypothesis, truncation=True, return_tensors="pt") output = model(input["input_ids"].to(device)) # device = "cuda:0" or "cpu" prediction = torch.softmax(output["logits"][0], -1).tolist() label_names = ["entailment", "neutral", "contradiction"] prediction = {name: round(float(pred) * 100, 1) for pred, name in zip(prediction, label_names)} print(prediction) ``` ### Training data This model was trained on the [multilingual-nli-26lang-2mil7 dataset](https://huggingface.co/datasets/MoritzLaurer/multilingual-NLI-26lang-2mil7) and the [XNLI](https://huggingface.co/datasets/xnli) validation dataset. The multilingual-nli-26lang-2mil7 dataset contains 2 730 000 NLI hypothesis-premise pairs in 26 languages spoken by more than 4 billion people. The dataset contains 105 000 text pairs per language. It is based on the English datasets [MultiNLI](https://huggingface.co/datasets/multi_nli), [Fever-NLI](https://github.com/easonnie/combine-FEVER-NSMN/blob/master/other_resources/nli_fever.md), [ANLI](https://huggingface.co/datasets/anli), [LingNLI](https://arxiv.org/pdf/2104.07179.pdf) and [WANLI](https://huggingface.co/datasets/alisawuffles/WANLI) and was created using the latest open-source machine translation models. The languages in the dataset are: ['ar', 'bn', 'de', 'es', 'fa', 'fr', 'he', 'hi', 'id', 'it', 'ja', 'ko', 'mr', 'nl', 'pl', 'ps', 'pt', 'ru', 'sv', 'sw', 'ta', 'tr', 'uk', 'ur', 'vi', 'zh'] (see [ISO language codes](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes). For more details, see the [datasheet](XXX). In addition, a sample of 105 000 text pairs was also added for English following the same sampling method as the other languages, leading to 27 languages. Moreover, for each language a random set of 10% of the hypothesis-premise pairs was added where an English hypothesis was paired with the premise in the other language (and the same for English premises and other language hypotheses). This mix of languages in the text pairs should enable users to formulate a hypothesis in English for a target text in another language. The [XNLI](https://huggingface.co/datasets/xnli) validation set consists of 2490 professionally translated texts from English to 14 other languages (37350 texts in total) (see [this paper](https://arxiv.org/pdf/1809.05053.pdf)). Note that XNLI also contains a training set of 14 machine translated versions of the MultiNLI dataset for 14 languages, but this data was excluded due to quality issues with the machine translations from 2018. Note that for evaluation purposes, three languages were excluded from the XNLI training data and only included in the test data: ["bg","el","th"]. This was done in order to test the performance of the model on languages it has not seen during NLI fine-tuning on 27 languages, but only during pre-training on 100 languages - see evaluation metrics below. The total training dataset had a size of 3 287 280 hypothesis-premise pairs. ### Training procedure mDeBERTa-v3-base-mnli-xnli was trained using the Hugging Face trainer with the following hyperparameters. ``` training_args = TrainingArguments( num_train_epochs=3, # total number of training epochs learning_rate=2e-05, per_device_train_batch_size=32, # batch size per device during training gradient_accumulation_steps=2, # to double the effective batch size for warmup_ratio=0.06, # number of warmup steps for learning rate scheduler weight_decay=0.01, # strength of weight decay fp16=False ) ``` ### Eval results The model was evaluated on the XNLI test set in 15 languages (5010 texts per language, 75150 in total) and the English test sets of [MultiNLI](https://huggingface.co/datasets/multi_nli), [Fever-NLI](https://github.com/easonnie/combine-FEVER-NSMN/blob/master/other_resources/nli_fever.md), [ANLI](https://huggingface.co/datasets/anli), [LingNLI](https://arxiv.org/pdf/2104.07179.pdf) and [WANLI](https://huggingface.co/datasets/alisawuffles/WANLI) . Note that multilingual NLI models are capable of classifying NLI texts without receiving NLI training data in the specific language (cross-lingual transfer). This means that the model is also able to do NLI on the other 73 languages mDeBERTa was pre-trained on, but performance is most likely lower than for those languages seen during NLI fine-tuning. The performance on the languages ["bg","el","th"] in the table below is a good indicated of this cross-lingual transfer, as these languages were not included in the training data. |XNLI subsets|ar|bg|de|el|en|es|fr|hi|ru|sw|th|tr|ur|vi|zh| | :---: |:---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | |Accuracy|0.794|0.822|0.824|0.809|0.871|0.832|0.823|0.769|0.803|0.746|0.786|0.792|0.744|0.793|0.803| |Speed (text/sec, A100-GPU)|1344.0|1355.0|1472.0|1149.0|1697.0|1446.0|1278.0|1115.0|1380.0|1463.0|1713.0|1594.0|1189.0|877.0|1887.0| |English Datasets|mnli_test_m|mnli_test_mm|anli_test|anli_test_r3|fever_test|ling_test|wanli_test| | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | |Accuracy|0.857|0.856|0.537|0.497|0.761|0.788|0.732|0.794| |Speed (text/sec, A100-GPU)|1000.0|1009.0|794.0|672.0|374.0|1177.0|1468.0| Also note that if other multilingual models on the model hub claim performance of around 90% on languages other than English, the authors have most likely made a mistake during testing since non of the latest papers shows a multilingual average performance of more than a few points above 80% on XNLI (see [here](https://arxiv.org/pdf/2111.09543.pdf) or [here](https://arxiv.org/pdf/1911.02116.pdf)). ## Limitations and bias Please consult the original DeBERTa-V3 paper and literature on different NLI datasets for potential biases. Moreover, note that the multilingual-nli-26lang-2mil7 dataset was created using machine translation, which reduces the quality of the data for a complex task like NLI. You can inspect the data via the Hugging Face [dataset viewer](https://huggingface.co/datasets/MoritzLaurer/multilingual-NLI-26lang-2mil7) for languages you are interested in. Note that grammatical errors introduced by machine translation are less of an issue for zero-shot classification, for which grammar is less important. ## Citation If the dataset is useful for you, please cite the following article: ``` @article{laurer_less_2022, title = {Less {Annotating}, {More} {Classifying} – {Addressing} the {Data} {Scarcity} {Issue} of {Supervised} {Machine} {Learning} with {Deep} {Transfer} {Learning} and {BERT} - {NLI}}, url = {https://osf.io/74b8k}, language = {en-us}, urldate = {2022-07-28}, journal = {Preprint}, author = {Laurer, Moritz and Atteveldt, Wouter van and Casas, Andreu Salleras and Welbers, Kasper}, month = jun, year = {2022}, note = {Publisher: Open Science Framework}, } ``` ## Ideas for cooperation or questions? For updates on new models and datasets, follow me on [Twitter](https://twitter.com/MoritzLaurer). If you have questions or ideas for cooperation, contact me at m{dot}laurer{at}vu{dot}nl or on [LinkedIn](https://www.linkedin.com/in/moritz-laurer/) ## Debugging and issues Note that DeBERTa-v3 was released in late 2021 and older versions of HF Transformers seem to have issues running the model (e.g. resulting in an issue with the tokenizer). Using Transformers==4.13 or higher might solve some issues. Note that mDeBERTa currently does not support FP16, see here: https://github.com/microsoft/DeBERTa/issues/77
philschmid/bart-large-cnn-samsum
philschmid
"2022-12-23T19:48:57Z"
525,246
227
transformers
[ "transformers", "pytorch", "bart", "text2text-generation", "sagemaker", "summarization", "en", "dataset:samsum", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
summarization
"2022-03-02T23:29:05Z"
--- language: en license: mit tags: - sagemaker - bart - summarization datasets: - samsum widget: - text: "Jeff: Can I train a \U0001F917 Transformers model on Amazon SageMaker? \n\ Philipp: Sure you can use the new Hugging Face Deep Learning Container. \nJeff:\ \ ok.\nJeff: and how can I get started? \nJeff: where can I find documentation?\ \ \nPhilipp: ok, ok you can find everything here. https://huggingface.co/blog/the-partnership-amazon-sagemaker-and-hugging-face\n" model-index: - name: bart-large-cnn-samsum results: - task: type: summarization name: Summarization dataset: name: 'SAMSum Corpus: A Human-annotated Dialogue Dataset for Abstractive Summarization' type: samsum metrics: - type: rogue-1 value: 42.621 name: Validation ROGUE-1 - type: rogue-2 value: 21.9825 name: Validation ROGUE-2 - type: rogue-l value: 33.034 name: Validation ROGUE-L - type: rogue-1 value: 41.3174 name: Test ROGUE-1 - type: rogue-2 value: 20.8716 name: Test ROGUE-2 - type: rogue-l value: 32.1337 name: Test ROGUE-L - task: type: summarization name: Summarization dataset: name: samsum type: samsum config: samsum split: test metrics: - type: rouge value: 41.3282 name: ROUGE-1 verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiZTYzNzZkZDUzOWQzNGYxYTJhNGE4YWYyZjA0NzMyOWUzMDNhMmVhYzY1YTM0ZTJhYjliNGE4MDZhMjhhYjRkYSIsInZlcnNpb24iOjF9.OOM6l3v5rJCndmUIJV-2SDh2NjbPo5IgQOSL-Ju1Gwbi1voL5amsDEDOelaqlUBE3n55KkUsMLZhyn66yWxZBQ - type: rouge value: 20.8755 name: ROUGE-2 verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMWZiODFiYWQzY2NmOTc5YjA3NTI0YzQ1MzQ0ODk2NjgyMmVlMjA5MjZiNTJkMGRmZGEzN2M3MDNkMjkxMDVhYSIsInZlcnNpb24iOjF9.b8cPk2-IL24La3Vd0hhtii4tRXujh5urAwy6IVeTWHwYfXaURyC2CcQOWtlOx5bdO5KACeaJFrFBCGgjk-VGCQ - type: rouge value: 32.1353 name: ROUGE-L verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiYWNmYzdiYWQ2ZWRkYzRiMGMxNWUwODgwZTdkY2NjZTc1NWE5NTFiMzU0OTU1N2JjN2ExYWQ2NGZkNjk5OTc4YSIsInZlcnNpb24iOjF9.Fzv4p-TEVicljiCqsBJHK1GsnE_AwGqamVmxTPI0WBNSIhZEhliRGmIL_z1pDq6WOzv3GN2YUGvhowU7GxnyAQ - type: rouge value: 38.401 name: ROUGE-LSUM verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNGI4MWY0NWMxMmQ0ODQ5MDhiNDczMDAzYzJkODBiMzgzYWNkMWM2YTZkZDJmNWJiOGQ3MmNjMGViN2UzYWI2ZSIsInZlcnNpb24iOjF9.7lw3h5k5lJ7tYFLZGUtLyDabFYd00l6ByhmvkW4fykocBy9Blyin4tdw4Xps4DW-pmrdMLgidHxBWz5MrSx1Bw - type: loss value: 1.4297215938568115 name: loss verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMzI0ZWNhNDM5YTViZDMyZGJjMDA1ZWFjYzNhOTdlOTFiNzhhMDBjNmM2MjA3ZmRkZjJjMjEyMGY3MzcwOTI2NyIsInZlcnNpb24iOjF9.oNaZsAtUDqGAqoZWJavlcW7PKx1AWsnkbhaQxadpOKk_u7ywJJabvTtzyx_DwEgZslgDETCf4MM-JKitZKjiDA - type: gen_len value: 60.0757 name: gen_len verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiYTgwYWYwMDRkNTJkMDM5N2I2MWNmYzQ3OWM1NDJmODUyZGViMGE4ZTdkNmIwYWM2N2VjZDNmN2RiMDE4YTYyYiIsInZlcnNpb24iOjF9.PbXTcNYX_SW-BuRQEcqyc21M7uKrOMbffQSAK6k2GLzTVRrzZxsDC57ktKL68zRY8fSiRGsnknOwv-nAR6YBCQ --- ## `bart-large-cnn-samsum` > If you want to use the model you should try a newer fine-tuned FLAN-T5 version [philschmid/flan-t5-base-samsum](https://huggingface.co/philschmid/flan-t5-base-samsum) out socring the BART version with `+6` on `ROGUE1` achieving `47.24`. # TRY [philschmid/flan-t5-base-samsum](https://huggingface.co/philschmid/flan-t5-base-samsum) This model was trained using Amazon SageMaker and the new Hugging Face Deep Learning container. For more information look at: - [🤗 Transformers Documentation: Amazon SageMaker](https://huggingface.co/transformers/sagemaker.html) - [Example Notebooks](https://github.com/huggingface/notebooks/tree/master/sagemaker) - [Amazon SageMaker documentation for Hugging Face](https://docs.aws.amazon.com/sagemaker/latest/dg/hugging-face.html) - [Python SDK SageMaker documentation for Hugging Face](https://sagemaker.readthedocs.io/en/stable/frameworks/huggingface/index.html) - [Deep Learning Container](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers) ## Hyperparameters ```json { "dataset_name": "samsum", "do_eval": true, "do_predict": true, "do_train": true, "fp16": true, "learning_rate": 5e-05, "model_name_or_path": "facebook/bart-large-cnn", "num_train_epochs": 3, "output_dir": "/opt/ml/model", "per_device_eval_batch_size": 4, "per_device_train_batch_size": 4, "predict_with_generate": true, "seed": 7 } ``` ## Usage ```python from transformers import pipeline summarizer = pipeline("summarization", model="philschmid/bart-large-cnn-samsum") conversation = '''Jeff: Can I train a 🤗 Transformers model on Amazon SageMaker? Philipp: Sure you can use the new Hugging Face Deep Learning Container. Jeff: ok. Jeff: and how can I get started? Jeff: where can I find documentation? Philipp: ok, ok you can find everything here. https://huggingface.co/blog/the-partnership-amazon-sagemaker-and-hugging-face ''' summarizer(conversation) ``` ## Results | key | value | | --- | ----- | | eval_rouge1 | 42.621 | | eval_rouge2 | 21.9825 | | eval_rougeL | 33.034 | | eval_rougeLsum | 39.6783 | | test_rouge1 | 41.3174 | | test_rouge2 | 20.8716 | | test_rougeL | 32.1337 | | test_rougeLsum | 38.4149 |
google/mt5-base
google
"2023-01-24T16:37:25Z"
518,744
156
transformers
[ "transformers", "pytorch", "tf", "jax", "mt5", "text2text-generation", "multilingual", "af", "am", "ar", "az", "be", "bg", "bn", "ca", "ceb", "co", "cs", "cy", "da", "de", "el", "en", "eo", "es", "et", "eu", "fa", "fi", "fil", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "haw", "hi", "hmn", "ht", "hu", "hy", "ig", "is", "it", "iw", "ja", "jv", "ka", "kk", "km", "kn", "ko", "ku", "ky", "la", "lb", "lo", "lt", "lv", "mg", "mi", "mk", "ml", "mn", "mr", "ms", "mt", "my", "ne", "nl", "no", "ny", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "sm", "sn", "so", "sq", "sr", "st", "su", "sv", "sw", "ta", "te", "tg", "th", "tr", "uk", "und", "ur", "uz", "vi", "xh", "yi", "yo", "zh", "zu", "dataset:mc4", "arxiv:2010.11934", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text2text-generation
"2022-03-02T23:29:05Z"
--- language: - multilingual - af - am - ar - az - be - bg - bn - ca - ceb - co - cs - cy - da - de - el - en - eo - es - et - eu - fa - fi - fil - fr - fy - ga - gd - gl - gu - ha - haw - hi - hmn - ht - hu - hy - ig - is - it - iw - ja - jv - ka - kk - km - kn - ko - ku - ky - la - lb - lo - lt - lv - mg - mi - mk - ml - mn - mr - ms - mt - my - ne - nl - no - ny - pa - pl - ps - pt - ro - ru - sd - si - sk - sl - sm - sn - so - sq - sr - st - su - sv - sw - ta - te - tg - th - tr - uk - und - ur - uz - vi - xh - yi - yo - zh - zu datasets: - mc4 license: apache-2.0 --- [Google's mT5](https://github.com/google-research/multilingual-t5) mT5 is pretrained on the [mC4](https://www.tensorflow.org/datasets/catalog/c4#c4multilingual) corpus, covering 101 languages: Afrikaans, Albanian, Amharic, Arabic, Armenian, Azerbaijani, Basque, Belarusian, Bengali, Bulgarian, Burmese, Catalan, Cebuano, Chichewa, Chinese, Corsican, Czech, Danish, Dutch, English, Esperanto, Estonian, Filipino, Finnish, French, Galician, Georgian, German, Greek, Gujarati, Haitian Creole, Hausa, Hawaiian, Hebrew, Hindi, Hmong, Hungarian, Icelandic, Igbo, Indonesian, Irish, Italian, Japanese, Javanese, Kannada, Kazakh, Khmer, Korean, Kurdish, Kyrgyz, Lao, Latin, Latvian, Lithuanian, Luxembourgish, Macedonian, Malagasy, Malay, Malayalam, Maltese, Maori, Marathi, Mongolian, Nepali, Norwegian, Pashto, Persian, Polish, Portuguese, Punjabi, Romanian, Russian, Samoan, Scottish Gaelic, Serbian, Shona, Sindhi, Sinhala, Slovak, Slovenian, Somali, Sotho, Spanish, Sundanese, Swahili, Swedish, Tajik, Tamil, Telugu, Thai, Turkish, Ukrainian, Urdu, Uzbek, Vietnamese, Welsh, West Frisian, Xhosa, Yiddish, Yoruba, Zulu. **Note**: mT5 was only pre-trained on mC4 excluding any supervised training. Therefore, this model has to be fine-tuned before it is useable on a downstream task. Pretraining Dataset: [mC4](https://www.tensorflow.org/datasets/catalog/c4#c4multilingual) Other Community Checkpoints: [here](https://huggingface.co/models?search=mt5) Paper: [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) Authors: *Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel* ## Abstract The recent "Text-to-Text Transfer Transformer" (T5) leveraged a unified text-to-text format and scale to attain state-of-the-art results on a wide variety of English-language NLP tasks. In this paper, we introduce mT5, a multilingual variant of T5 that was pre-trained on a new Common Crawl-based dataset covering 101 languages. We describe the design and modified training of mT5 and demonstrate its state-of-the-art performance on many multilingual benchmarks. All of the code and model checkpoints used in this work are publicly available.
google-bert/bert-base-multilingual-uncased
google-bert
"2024-02-19T11:06:00Z"
516,117
74
transformers
[ "transformers", "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "multilingual", "af", "sq", "ar", "an", "hy", "ast", "az", "ba", "eu", "bar", "be", "bn", "inc", "bs", "br", "bg", "my", "ca", "ceb", "ce", "zh", "cv", "hr", "cs", "da", "nl", "en", "et", "fi", "fr", "gl", "ka", "de", "el", "gu", "ht", "he", "hi", "hu", "is", "io", "id", "ga", "it", "ja", "jv", "kn", "kk", "ky", "ko", "la", "lv", "lt", "roa", "nds", "lm", "mk", "mg", "ms", "ml", "mr", "min", "ne", "new", "nb", "nn", "oc", "fa", "pms", "pl", "pt", "pa", "ro", "ru", "sco", "sr", "scn", "sk", "sl", "aze", "es", "su", "sw", "sv", "tl", "tg", "ta", "tt", "te", "tr", "uk", "ud", "uz", "vi", "vo", "war", "cy", "fry", "pnb", "yo", "dataset:wikipedia", "arxiv:1810.04805", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
"2022-03-02T23:29:04Z"
--- language: - multilingual - af - sq - ar - an - hy - ast - az - ba - eu - bar - be - bn - inc - bs - br - bg - my - ca - ceb - ce - zh - cv - hr - cs - da - nl - en - et - fi - fr - gl - ka - de - el - gu - ht - he - hi - hu - is - io - id - ga - it - ja - jv - kn - kk - ky - ko - la - lv - lt - roa - nds - lm - mk - mg - ms - ml - mr - min - ne - new - nb - nn - oc - fa - pms - pl - pt - pa - ro - ru - sco - sr - hr - scn - sk - sl - aze - es - su - sw - sv - tl - tg - ta - tt - te - tr - uk - ud - uz - vi - vo - war - cy - fry - pnb - yo license: apache-2.0 datasets: - wikipedia --- # BERT multilingual base model (uncased) Pretrained model on the top 102 languages with the largest Wikipedia using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released in [this repository](https://github.com/google-research/bert). This model is uncased: it does not make a difference between english and English. Disclaimer: The team releasing BERT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description BERT is a transformers model pretrained on a large corpus of multilingual data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was pretrained with two objectives: - Masked language modeling (MLM): taking a sentence, the model randomly masks 15% of the words in the input then run the entire masked sentence through the model and has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like GPT which internally mask the future tokens. It allows the model to learn a bidirectional representation of the sentence. - Next sentence prediction (NSP): the models concatenates two masked sentences as inputs during pretraining. Sometimes they correspond to sentences that were next to each other in the original text, sometimes not. The model then has to predict if the two sentences were following each other or not. This way, the model learns an inner representation of the languages in the training set that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled sentences for instance, you can train a standard classifier using the features produced by the BERT model as inputs. ## Intended uses & limitations You can use the raw model for either masked language modeling or next sentence prediction, but it's mostly intended to be fine-tuned on a downstream task. See the [model hub](https://huggingface.co/models?filter=bert) to look for fine-tuned versions on a task that interests you. Note that this model is primarily aimed at being fine-tuned on tasks that use the whole sentence (potentially masked) to make decisions, such as sequence classification, token classification or question answering. For tasks such as text generation you should look at model like GPT2. ### How to use You can use this model directly with a pipeline for masked language modeling: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='bert-base-multilingual-uncased') >>> unmasker("Hello I'm a [MASK] model.") [{'sequence': "[CLS] hello i'm a top model. [SEP]", 'score': 0.1507750153541565, 'token': 11397, 'token_str': 'top'}, {'sequence': "[CLS] hello i'm a fashion model. [SEP]", 'score': 0.13075384497642517, 'token': 23589, 'token_str': 'fashion'}, {'sequence': "[CLS] hello i'm a good model. [SEP]", 'score': 0.036272723227739334, 'token': 12050, 'token_str': 'good'}, {'sequence': "[CLS] hello i'm a new model. [SEP]", 'score': 0.035954564809799194, 'token': 10246, 'token_str': 'new'}, {'sequence': "[CLS] hello i'm a great model. [SEP]", 'score': 0.028643041849136353, 'token': 11838, 'token_str': 'great'}] ``` Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import BertTokenizer, BertModel tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-uncased') model = BertModel.from_pretrained("bert-base-multilingual-uncased") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import BertTokenizer, TFBertModel tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-uncased') model = TFBertModel.from_pretrained("bert-base-multilingual-uncased") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ### Limitations and bias Even if the training data used for this model could be characterized as fairly neutral, this model can have biased predictions: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='bert-base-multilingual-uncased') >>> unmasker("The man worked as a [MASK].") [{'sequence': '[CLS] the man worked as a teacher. [SEP]', 'score': 0.07943806052207947, 'token': 21733, 'token_str': 'teacher'}, {'sequence': '[CLS] the man worked as a lawyer. [SEP]', 'score': 0.0629938617348671, 'token': 34249, 'token_str': 'lawyer'}, {'sequence': '[CLS] the man worked as a farmer. [SEP]', 'score': 0.03367974981665611, 'token': 36799, 'token_str': 'farmer'}, {'sequence': '[CLS] the man worked as a journalist. [SEP]', 'score': 0.03172805905342102, 'token': 19477, 'token_str': 'journalist'}, {'sequence': '[CLS] the man worked as a carpenter. [SEP]', 'score': 0.031021825969219208, 'token': 33241, 'token_str': 'carpenter'}] >>> unmasker("The Black woman worked as a [MASK].") [{'sequence': '[CLS] the black woman worked as a nurse. [SEP]', 'score': 0.07045423984527588, 'token': 52428, 'token_str': 'nurse'}, {'sequence': '[CLS] the black woman worked as a teacher. [SEP]', 'score': 0.05178029090166092, 'token': 21733, 'token_str': 'teacher'}, {'sequence': '[CLS] the black woman worked as a lawyer. [SEP]', 'score': 0.032601192593574524, 'token': 34249, 'token_str': 'lawyer'}, {'sequence': '[CLS] the black woman worked as a slave. [SEP]', 'score': 0.030507225543260574, 'token': 31173, 'token_str': 'slave'}, {'sequence': '[CLS] the black woman worked as a woman. [SEP]', 'score': 0.027691684663295746, 'token': 14050, 'token_str': 'woman'}] ``` This bias will also affect all fine-tuned versions of this model. ## Training data The BERT model was pretrained on the 102 languages with the largest Wikipedias. You can find the complete list [here](https://github.com/google-research/bert/blob/master/multilingual.md#list-of-languages). ## Training procedure ### Preprocessing The texts are lowercased and tokenized using WordPiece and a shared vocabulary size of 110,000. The languages with a larger Wikipedia are under-sampled and the ones with lower resources are oversampled. For languages like Chinese, Japanese Kanji and Korean Hanja that don't have space, a CJK Unicode block is added around every character. The inputs of the model are then of the form: ``` [CLS] Sentence A [SEP] Sentence B [SEP] ``` With probability 0.5, sentence A and sentence B correspond to two consecutive sentences in the original corpus and in the other cases, it's another random sentence in the corpus. Note that what is considered a sentence here is a consecutive span of text usually longer than a single sentence. The only constrain is that the result with the two "sentences" has a combined length of less than 512 tokens. The details of the masking procedure for each sentence are the following: - 15% of the tokens are masked. - In 80% of the cases, the masked tokens are replaced by `[MASK]`. - In 10% of the cases, the masked tokens are replaced by a random token (different) from the one they replace. - In the 10% remaining cases, the masked tokens are left as is. ### BibTeX entry and citation info ```bibtex @article{DBLP:journals/corr/abs-1810-04805, author = {Jacob Devlin and Ming{-}Wei Chang and Kenton Lee and Kristina Toutanova}, title = {{BERT:} Pre-training of Deep Bidirectional Transformers for Language Understanding}, journal = {CoRR}, volume = {abs/1810.04805}, year = {2018}, url = {http://arxiv.org/abs/1810.04805}, archivePrefix = {arXiv}, eprint = {1810.04805}, timestamp = {Tue, 30 Oct 2018 20:39:56 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-1810-04805.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
sentence-transformers/distilbert-base-nli-stsb-mean-tokens
sentence-transformers
"2024-03-27T10:18:52Z"
515,465
11
sentence-transformers
[ "sentence-transformers", "pytorch", "tf", "safetensors", "distilbert", "feature-extraction", "sentence-similarity", "transformers", "arxiv:1908.10084", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
sentence-similarity
"2022-03-02T23:29:05Z"
--- license: apache-2.0 library_name: sentence-transformers tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers pipeline_tag: sentence-similarity --- **⚠️ This model is deprecated. Please don't use it as it produces sentence embeddings of low quality. You can find recommended sentence embedding models here: [SBERT.net - Pretrained Models](https://www.sbert.net/docs/pretrained_models.html)** # sentence-transformers/distilbert-base-nli-stsb-mean-tokens This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('sentence-transformers/distilbert-base-nli-stsb-mean-tokens') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/distilbert-base-nli-stsb-mean-tokens') model = AutoModel.from_pretrained('sentence-transformers/distilbert-base-nli-stsb-mean-tokens') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, max pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=sentence-transformers/distilbert-base-nli-stsb-mean-tokens) ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors This model was trained by [sentence-transformers](https://www.sbert.net/). If you find this model helpful, feel free to cite our publication [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](https://arxiv.org/abs/1908.10084): ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "http://arxiv.org/abs/1908.10084", } ```
google-bert/bert-large-cased
google-bert
"2024-02-19T11:06:20Z"
515,316
19
transformers
[ "transformers", "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
"2022-03-02T23:29:04Z"
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # BERT large model (cased) Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released in [this repository](https://github.com/google-research/bert). This model is cased: it makes a difference between english and English. Disclaimer: The team releasing BERT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description BERT is a transformers model pretrained on a large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was pretrained with two objectives: - Masked language modeling (MLM): taking a sentence, the model randomly masks 15% of the words in the input then run the entire masked sentence through the model and has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like GPT which internally mask the future tokens. It allows the model to learn a bidirectional representation of the sentence. - Next sentence prediction (NSP): the models concatenates two masked sentences as inputs during pretraining. Sometimes they correspond to sentences that were next to each other in the original text, sometimes not. The model then has to predict if the two sentences were following each other or not. This way, the model learns an inner representation of the English language that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled sentences for instance, you can train a standard classifier using the features produced by the BERT model as inputs. This model has the following configuration: - 24-layer - 1024 hidden dimension - 16 attention heads - 336M parameters. ## Intended uses & limitations You can use the raw model for either masked language modeling or next sentence prediction, but it's mostly intended to be fine-tuned on a downstream task. See the [model hub](https://huggingface.co/models?filter=bert) to look for fine-tuned versions on a task that interests you. Note that this model is primarily aimed at being fine-tuned on tasks that use the whole sentence (potentially masked) to make decisions, such as sequence classification, token classification or question answering. For tasks such as text generation you should look at model like GPT2. ### How to use You can use this model directly with a pipeline for masked language modeling: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='bert-large-cased') >>> unmasker("Hello I'm a [MASK] model.") [ { "sequence":"[CLS] Hello I'm a male model. [SEP]", "score":0.22748498618602753, "token":2581, "token_str":"male" }, { "sequence":"[CLS] Hello I'm a fashion model. [SEP]", "score":0.09146175533533096, "token":4633, "token_str":"fashion" }, { "sequence":"[CLS] Hello I'm a new model. [SEP]", "score":0.05823173746466637, "token":1207, "token_str":"new" }, { "sequence":"[CLS] Hello I'm a super model. [SEP]", "score":0.04488750174641609, "token":7688, "token_str":"super" }, { "sequence":"[CLS] Hello I'm a famous model. [SEP]", "score":0.03271442651748657, "token":2505, "token_str":"famous" } ] ``` Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import BertTokenizer, BertModel tokenizer = BertTokenizer.from_pretrained('bert-large-cased') model = BertModel.from_pretrained("bert-large-cased") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import BertTokenizer, TFBertModel tokenizer = BertTokenizer.from_pretrained('bert-large-cased') model = TFBertModel.from_pretrained("bert-large-cased") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ### Limitations and bias Even if the training data used for this model could be characterized as fairly neutral, this model can have biased predictions: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='bert-large-cased') >>> unmasker("The man worked as a [MASK].") [ { "sequence":"[CLS] The man worked as a doctor. [SEP]", "score":0.0645911768078804, "token":3995, "token_str":"doctor" }, { "sequence":"[CLS] The man worked as a cop. [SEP]", "score":0.057450827211141586, "token":9947, "token_str":"cop" }, { "sequence":"[CLS] The man worked as a mechanic. [SEP]", "score":0.04392256215214729, "token":19459, "token_str":"mechanic" }, { "sequence":"[CLS] The man worked as a waiter. [SEP]", "score":0.03755280375480652, "token":17989, "token_str":"waiter" }, { "sequence":"[CLS] The man worked as a teacher. [SEP]", "score":0.03458863124251366, "token":3218, "token_str":"teacher" } ] >>> unmasker("The woman worked as a [MASK].") [ { "sequence":"[CLS] The woman worked as a nurse. [SEP]", "score":0.2572779953479767, "token":7439, "token_str":"nurse" }, { "sequence":"[CLS] The woman worked as a waitress. [SEP]", "score":0.16706500947475433, "token":15098, "token_str":"waitress" }, { "sequence":"[CLS] The woman worked as a teacher. [SEP]", "score":0.04587847739458084, "token":3218, "token_str":"teacher" }, { "sequence":"[CLS] The woman worked as a secretary. [SEP]", "score":0.03577028587460518, "token":4848, "token_str":"secretary" }, { "sequence":"[CLS] The woman worked as a maid. [SEP]", "score":0.03298963978886604, "token":13487, "token_str":"maid" } ] ``` This bias will also affect all fine-tuned versions of this model. ## Training data The BERT model was pretrained on [BookCorpus](https://yknzhu.wixsite.com/mbweb), a dataset consisting of 11,038 unpublished books and [English Wikipedia](https://en.wikipedia.org/wiki/English_Wikipedia) (excluding lists, tables and headers). ## Training procedure ### Preprocessing The texts are lowercased and tokenized using WordPiece and a vocabulary size of 30,000. The inputs of the model are then of the form: ``` [CLS] Sentence A [SEP] Sentence B [SEP] ``` With probability 0.5, sentence A and sentence B correspond to two consecutive sentences in the original corpus and in the other cases, it's another random sentence in the corpus. Note that what is considered a sentence here is a consecutive span of text usually longer than a single sentence. The only constrain is that the result with the two "sentences" has a combined length of less than 512 tokens. The details of the masking procedure for each sentence are the following: - 15% of the tokens are masked. - In 80% of the cases, the masked tokens are replaced by `[MASK]`. - In 10% of the cases, the masked tokens are replaced by a random token (different) from the one they replace. - In the 10% remaining cases, the masked tokens are left as is. ### Pretraining The model was trained on 4 cloud TPUs in Pod configuration (16 TPU chips total) for one million steps with a batch size of 256. The sequence length was limited to 128 tokens for 90% of the steps and 512 for the remaining 10%. The optimizer used is Adam with a learning rate of 1e-4, \\(\beta_{1} = 0.9\\) and \\(\beta_{2} = 0.999\\), a weight decay of 0.01, learning rate warmup for 10,000 steps and linear decay of the learning rate after. ## Evaluation results When fine-tuned on downstream tasks, this model achieves the following results: Model | SQUAD 1.1 F1/EM | Multi NLI Accuracy ---------------------------------------- | :-------------: | :----------------: BERT-Large, Cased (Original) | 91.5/84.8 | 86.09 ### BibTeX entry and citation info ```bibtex @article{DBLP:journals/corr/abs-1810-04805, author = {Jacob Devlin and Ming{-}Wei Chang and Kenton Lee and Kristina Toutanova}, title = {{BERT:} Pre-training of Deep Bidirectional Transformers for Language Understanding}, journal = {CoRR}, volume = {abs/1810.04805}, year = {2018}, url = {http://arxiv.org/abs/1810.04805}, archivePrefix = {arXiv}, eprint = {1810.04805}, timestamp = {Tue, 30 Oct 2018 20:39:56 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-1810-04805.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
cross-encoder/ms-marco-MiniLM-L-4-v2
cross-encoder
"2021-08-05T08:39:32Z"
509,998
1
transformers
[ "transformers", "pytorch", "jax", "bert", "text-classification", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text-classification
"2022-03-02T23:29:05Z"
--- license: apache-2.0 --- # Cross-Encoder for MS Marco This model was trained on the [MS Marco Passage Ranking](https://github.com/microsoft/MSMARCO-Passage-Ranking) task. The model can be used for Information Retrieval: Given a query, encode the query will all possible passages (e.g. retrieved with ElasticSearch). Then sort the passages in a decreasing order. See [SBERT.net Retrieve & Re-rank](https://www.sbert.net/examples/applications/retrieve_rerank/README.html) for more details. The training code is available here: [SBERT.net Training MS Marco](https://github.com/UKPLab/sentence-transformers/tree/master/examples/training/ms_marco) ## Usage with Transformers ```python from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch model = AutoModelForSequenceClassification.from_pretrained('model_name') tokenizer = AutoTokenizer.from_pretrained('model_name') features = tokenizer(['How many people live in Berlin?', 'How many people live in Berlin?'], ['Berlin has a population of 3,520,031 registered inhabitants in an area of 891.82 square kilometers.', 'New York City is famous for the Metropolitan Museum of Art.'], padding=True, truncation=True, return_tensors="pt") model.eval() with torch.no_grad(): scores = model(**features).logits print(scores) ``` ## Usage with SentenceTransformers The usage becomes easier when you have [SentenceTransformers](https://www.sbert.net/) installed. Then, you can use the pre-trained models like this: ```python from sentence_transformers import CrossEncoder model = CrossEncoder('model_name', max_length=512) scores = model.predict([('Query', 'Paragraph1'), ('Query', 'Paragraph2') , ('Query', 'Paragraph3')]) ``` ## Performance In the following table, we provide various pre-trained Cross-Encoders together with their performance on the [TREC Deep Learning 2019](https://microsoft.github.io/TREC-2019-Deep-Learning/) and the [MS Marco Passage Reranking](https://github.com/microsoft/MSMARCO-Passage-Ranking/) dataset. | Model-Name | NDCG@10 (TREC DL 19) | MRR@10 (MS Marco Dev) | Docs / Sec | | ------------- |:-------------| -----| --- | | **Version 2 models** | | | | cross-encoder/ms-marco-TinyBERT-L-2-v2 | 69.84 | 32.56 | 9000 | cross-encoder/ms-marco-MiniLM-L-2-v2 | 71.01 | 34.85 | 4100 | cross-encoder/ms-marco-MiniLM-L-4-v2 | 73.04 | 37.70 | 2500 | cross-encoder/ms-marco-MiniLM-L-6-v2 | 74.30 | 39.01 | 1800 | cross-encoder/ms-marco-MiniLM-L-12-v2 | 74.31 | 39.02 | 960 | **Version 1 models** | | | | cross-encoder/ms-marco-TinyBERT-L-2 | 67.43 | 30.15 | 9000 | cross-encoder/ms-marco-TinyBERT-L-4 | 68.09 | 34.50 | 2900 | cross-encoder/ms-marco-TinyBERT-L-6 | 69.57 | 36.13 | 680 | cross-encoder/ms-marco-electra-base | 71.99 | 36.41 | 340 | **Other models** | | | | nboost/pt-tinybert-msmarco | 63.63 | 28.80 | 2900 | nboost/pt-bert-base-uncased-msmarco | 70.94 | 34.75 | 340 | nboost/pt-bert-large-msmarco | 73.36 | 36.48 | 100 | Capreolus/electra-base-msmarco | 71.23 | 36.89 | 340 | amberoad/bert-multilingual-passage-reranking-msmarco | 68.40 | 35.54 | 330 | sebastian-hofstaetter/distilbert-cat-margin_mse-T2-msmarco | 72.82 | 37.88 | 720 Note: Runtime was computed on a V100 GPU.
facebook/mms-300m
facebook
"2023-06-05T10:23:32Z"
506,566
23
transformers
[ "transformers", "pytorch", "wav2vec2", "pretraining", "mms", "ab", "af", "ak", "am", "ar", "as", "av", "ay", "az", "ba", "bm", "be", "bn", "bi", "bo", "sh", "br", "bg", "ca", "cs", "ce", "cv", "ku", "cy", "da", "de", "dv", "dz", "el", "en", "eo", "et", "eu", "ee", "fo", "fa", "fj", "fi", "fr", "fy", "ff", "ga", "gl", "gn", "gu", "zh", "ht", "ha", "he", "hi", "hu", "hy", "ig", "ia", "ms", "is", "it", "jv", "ja", "kn", "ka", "kk", "kr", "km", "ki", "rw", "ky", "ko", "kv", "lo", "la", "lv", "ln", "lt", "lb", "lg", "mh", "ml", "mr", "mk", "mg", "mt", "mn", "mi", "my", "nl", "no", "ne", "ny", "oc", "om", "or", "os", "pa", "pl", "pt", "ps", "qu", "ro", "rn", "ru", "sg", "sk", "sl", "sm", "sn", "sd", "so", "es", "sq", "su", "sv", "sw", "ta", "tt", "te", "tg", "tl", "th", "ti", "ts", "tr", "uk", "vi", "wo", "xh", "yo", "zu", "za", "dataset:google/fleurs", "arxiv:2305.13516", "license:cc-by-nc-4.0", "endpoints_compatible", "has_space", "region:us" ]
null
"2023-05-22T19:38:01Z"
--- tags: - mms language: - ab - af - ak - am - ar - as - av - ay - az - ba - bm - be - bn - bi - bo - sh - br - bg - ca - cs - ce - cv - ku - cy - da - de - dv - dz - el - en - eo - et - eu - ee - fo - fa - fj - fi - fr - fy - ff - ga - gl - gn - gu - zh - ht - ha - he - hi - sh - hu - hy - ig - ia - ms - is - it - jv - ja - kn - ka - kk - kr - km - ki - rw - ky - ko - kv - lo - la - lv - ln - lt - lb - lg - mh - ml - mr - ms - mk - mg - mt - mn - mi - my - zh - nl - 'no' - 'no' - ne - ny - oc - om - or - os - pa - pl - pt - ms - ps - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - ro - rn - ru - sg - sk - sl - sm - sn - sd - so - es - sq - su - sv - sw - ta - tt - te - tg - tl - th - ti - ts - tr - uk - ms - vi - wo - xh - ms - yo - ms - zu - za license: cc-by-nc-4.0 datasets: - google/fleurs metrics: - wer --- # Massively Multilingual Speech (MMS) - 300m Facebook's MMS counting *300m* parameters. MMS is Facebook AI's massive multilingual pretrained model for speech ("MMS"). It is pretrained in with [Wav2Vec2's self-supervised training objective](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) on about 500,000 hours of speech data in over 1,400 languages. When using the model make sure that your speech input is sampled at 16kHz. **Note**: This model should be fine-tuned on a downstream task, like Automatic Speech Recognition, Translation, or Classification. Check out the [**How-to-fine section](#how-to-finetune) or [**this blog**](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) for more information about ASR. ## Table Of Content - [How to Finetune](#how-to-finetune) - [Model details](#model-details) - [Additional links](#additional-links) ## How to finetune Coming soon... ## Model details - **Developed by:** Vineel Pratap et al. - **Model type:** Multi-Lingual Automatic Speech Recognition model - **Language(s):** 1000+ languages - **License:** CC-BY-NC 4.0 license - **Num parameters**: 300 million - **Cite as:** @article{pratap2023mms, title={Scaling Speech Technology to 1,000+ Languages}, author={Vineel Pratap and Andros Tjandra and Bowen Shi and Paden Tomasello and Arun Babu and Sayani Kundu and Ali Elkahky and Zhaoheng Ni and Apoorv Vyas and Maryam Fazel-Zarandi and Alexei Baevski and Yossi Adi and Xiaohui Zhang and Wei-Ning Hsu and Alexis Conneau and Michael Auli}, journal={arXiv}, year={2023} } ## Additional Links - [Blog post]( ) - [Transformers documentation](https://huggingface.co/docs/transformers/main/en/model_doc/mms). - [Paper](https://arxiv.org/abs/2305.13516) - [GitHub Repository](https://github.com/facebookresearch/fairseq/tree/main/examples/mms#asr) - [Other **MMS** checkpoints](https://huggingface.co/models?other=mms) - MMS ASR fine-tuned checkpoints: - [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all) - [facebook/mms-1b-l1107](https://huggingface.co/facebook/mms-1b-l1107) - [facebook/mms-1b-fl102](https://huggingface.co/facebook/mms-1b-fl102) - [Official Space](https://huggingface.co/spaces/facebook/MMS)
madhurjindal/autonlp-Gibberish-Detector-492513457
madhurjindal
"2024-02-21T15:22:52Z"
506,410
33
transformers
[ "transformers", "pytorch", "onnx", "safetensors", "distilbert", "text-classification", "autonlp", "en", "dataset:madhurjindal/autonlp-data-Gibberish-Detector", "co2_eq_emissions", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text-classification
"2022-03-02T23:29:05Z"
--- tags: [autonlp] language: en widget: - text: "I love Machine Learning!" datasets: - madhurjindal/autonlp-data-Gibberish-Detector co2_eq_emissions: 5.527544460835904 --- # Problem Description The ability to process and understand user input is crucial for various applications, such as chatbots or downstream tasks. However, a common challenge faced in such systems is the presence of gibberish or nonsensical input. To address this problem, we present a project focused on developing a gibberish detector for the English language. The primary goal of this project is to classify user input as either **gibberish** or **non-gibberish**, enabling more accurate and meaningful interactions with the system. We also aim to enhance the overall performance and user experience of chatbots and other systems that rely on user input. >## What is Gibberish? Gibberish refers to **nonsensical or meaningless language or text** that lacks coherence or any discernible meaning. It can be characterized by a combination of random words, nonsensical phrases, grammatical errors, or syntactical abnormalities that prevent the communication from conveying a clear and understandable message. Gibberish can vary in intensity, ranging from simple noise with no meaningful words to sentences that may appear superficially correct but lack coherence or logical structure when examined closely. Detecting and identifying gibberish is essential in various contexts, such as **natural language processing**, **chatbot systems**, **spam filtering**, and **language-based security measures**, to ensure effective communication and accurate processing of user inputs. ## Label Description Thus, we break down the problem into 4 categories: 1. **Noise:** Gibberish at the zero level where even the different constituents of the input phrase (words) do not hold any meaning independently. *For example: `dfdfer fgerfow2e0d qsqskdsd djksdnfkff swq.`* 2. **Word Salad:** Gibberish at level 1 where words make sense independently, but when looked at the bigger picture (the phrase) any meaning is not depicted. *For example: `22 madhur old punjab pickle chennai`* 3. **Mild gibberish:** Gibberish at level 2 where there is a part of the sentence that has grammatical errors, word sense errors, or any syntactical abnormalities, which leads the sentence to miss out on a coherent meaning. *For example: `Madhur study in a teacher`* 4. **Clean:** This category represents a set of words that form a complete and meaningful sentence on its own. *For example: `I love this website`* > **Tip:** To facilitate gibberish detection, you can combine the labels based on the desired level of detection. For instance, if you need to detect gibberish at level 1, you can group Noise and Word Salad together as "Gibberish," while considering Mild gibberish and Clean separately as "NotGibberish." This approach allows for flexibility in detecting and categorizing different levels of gibberish based on specific requirements. # Model Trained Using AutoNLP - Problem type: Multi-class Classification - Model ID: 492513457 - CO2 Emissions (in grams): 5.527544460835904 ## Validation Metrics - Loss: 0.07609463483095169 - Accuracy: 0.9735624586913417 - Macro F1: 0.9736173135739408 - Micro F1: 0.9735624586913417 - Weighted F1: 0.9736173135739408 - Macro Precision: 0.9737771415197378 - Micro Precision: 0.9735624586913417 - Weighted Precision: 0.9737771415197378 - Macro Recall: 0.9735624586913417 - Micro Recall: 0.9735624586913417 - Weighted Recall: 0.9735624586913417 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love Machine Learning!"}' https://api-inference.huggingface.co/models/madhurjindal/autonlp-Gibberish-Detector-492513457 ``` Or Python API: ``` import torch import torch.nn.functional as F from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("madhurjindal/autonlp-Gibberish-Detector-492513457", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("madhurjindal/autonlp-Gibberish-Detector-492513457", use_auth_token=True) inputs = tokenizer("I love Machine Learning!", return_tensors="pt") outputs = model(**inputs) probs = F.softmax(outputs.logits, dim=-1) predicted_index = torch.argmax(probs, dim=1).item() predicted_prob = probs[0][predicted_index].item() labels = model.config.id2label predicted_label = labels[predicted_index] for i, prob in enumerate(probs[0]): print(f"Class: {labels[i]}, Probability: {prob:.4f}") ``` Another simplifed solution with transformers pipline: ``` from transformers import pipeline selected_model = "madhurjindal/autonlp-Gibberish-Detector-492513457" classifier = pipeline("text-classification", model=selected_model) classifier("I love Machine Learning!") ```
playgroundai/playground-v2.5-1024px-aesthetic
playgroundai
"2024-03-15T00:00:20Z"
495,894
492
diffusers
[ "diffusers", "safetensors", "text-to-image", "playground", "arxiv:2206.00364", "arxiv:2402.17245", "license:other", "endpoints_compatible", "has_space", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
"2024-02-16T18:46:17Z"
--- license: other license_name: playground-v2dot5-community license_link: https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic/blob/main/LICENSE.md tags: - text-to-image - playground inference: parameters: guidance_scale: 3.0 --- # Playground v2.5 – 1024px Aesthetic Model This repository contains a model that generates highly aesthetic images of resolution 1024x1024, as well as portrait and landscape aspect ratios. You can use the model with Hugging Face 🧨 Diffusers. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/636c0c4eaae2da3c76b8a9a3/HYUUGfU6SOCHsvyeISQ5Y.png) **Playground v2.5** is a diffusion-based text-to-image generative model, and a successor to [Playground v2](https://huggingface.co/playgroundai/playground-v2-1024px-aesthetic). Playground v2.5 is the state-of-the-art open-source model in aesthetic quality. Our user studies demonstrate that our model outperforms SDXL, Playground v2, PixArt-α, DALL-E 3, and Midjourney 5.2. For details on the development and training of our model, please refer to our [blog post](https://blog.playgroundai.com/playground-v2-5/) and [technical report](https://marketing-cdn.playground.com/research/pgv2.5_compressed.pdf). ### Model Description - **Developed by:** [Playground](https://playground.com) - **Model type:** Diffusion-based text-to-image generative model - **License:** [Playground v2.5 Community License](https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic/blob/main/LICENSE.md) - **Summary:** This model generates images based on text prompts. It is a Latent Diffusion Model that uses two fixed, pre-trained text encoders (OpenCLIP-ViT/G and CLIP-ViT/L). It follows the same architecture as [Stable Diffusion XL](https://huggingface.co/docs/diffusers/en/using-diffusers/sdxl). ### Using the model with 🧨 Diffusers Install diffusers >= 0.27.0 and the relevant dependencies. ``` pip install diffusers>=0.27.0 pip install transformers accelerate safetensors ``` **Notes:** - The pipeline uses the `EDMDPMSolverMultistepScheduler` scheduler by default, for crisper fine details. It's an [EDM formulation](https://arxiv.org/abs/2206.00364) of the DPM++ 2M Karras scheduler. `guidance_scale=3.0` is a good default for this scheduler. - The pipeline also supports the `EDMEulerScheduler` scheduler. It's an [EDM formulation](https://arxiv.org/abs/2206.00364) of the Euler scheduler. `guidance_scale=5.0` is a good default for this scheduler. Then, run the following snippet: ```python from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained( "playgroundai/playground-v2.5-1024px-aesthetic", torch_dtype=torch.float16, variant="fp16", ).to("cuda") # # Optional: Use DPM++ 2M Karras scheduler for crisper fine details # from diffusers import EDMDPMSolverMultistepScheduler # pipe.scheduler = EDMDPMSolverMultistepScheduler() prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipe(prompt=prompt, num_inference_steps=50, guidance_scale=3).images[0] ``` ### Using the model with Automatic1111/ComfyUI Support coming soon. We will update this model card with instructions when ready. ### User Studies This model card only provides a brief summary of our user study results. For extensive details on how we perform user studies, please check out our [technical report](https://marketing-cdn.playground.com/research/pgv2.5_compressed.pdf). We conducted studies to measure overall aesthetic quality, as well as for the specific areas we aimed to improve with Playground v2.5, namely multi aspect ratios and human preference alignment. #### Comparison to State-of-the-Art ![image/png](https://cdn-uploads.huggingface.co/production/uploads/63855d851769b7c4b10e1f76/V7LFNzgoQJnL__ndU0CnE.png) The aesthetic quality of Playground v2.5 dramatically outperforms the current state-of-the-art open source models SDXL and PIXART-α, as well as Playground v2. Because the performance differential between Playground V2.5 and SDXL was so large, we also tested our aesthetic quality against world-class closed-source models like DALL-E 3 and Midjourney 5.2, and found that Playground v2.5 outperforms them as well. #### Multi Aspect Ratios ![image/png](https://cdn-uploads.huggingface.co/production/uploads/636c0c4eaae2da3c76b8a9a3/xMB0r-CmR3N6dABFlcV71.png) Similarly, for multi aspect ratios, we outperform SDXL by a large margin. #### Human Preference Alignment on People-related images ![image/png](https://cdn-uploads.huggingface.co/production/uploads/636c0c4eaae2da3c76b8a9a3/7c-8Stw52OsNtUjse8Slv.png) Next, we benchmark Playground v2.5 specifically on people-related images, to test Human Preference Alignment. We compared Playground v2.5 against two commonly-used baseline models: SDXL and RealStock v2, a community fine-tune of SDXL that was trained on a realistic people dataset. Playground v2.5 outperforms both baselines by a large margin. ### MJHQ-30K Benchmark ![image/png](https://cdn-uploads.huggingface.co/production/uploads/636c0c4eaae2da3c76b8a9a3/7tyYDPGUtokh-k18XDSte.png) | Model | Overall FID | | ------------------------------------- | ----- | | SDXL-1-0-refiner | 9.55 | | [playground-v2-1024px-aesthetic](https://huggingface.co/playgroundai/playground-v2-1024px-aesthetic) | 7.07 | | [playground-v2.5-1024px-aesthetic](https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic) | **4.48** | Lastly, we report metrics using our MJHQ-30K benchmark which we [open-sourced](https://huggingface.co/datasets/playgroundai/MJHQ-30K) with the v2 release. We report both the overall FID and per category FID. All FID metrics are computed at resolution 1024x1024. Our results show that Playground v2.5 outperforms both Playground v2 and SDXL in overall FID and all category FIDs, especially in the people and fashion categories. This is in line with the results of the user study, which indicates a correlation between human preferences and the FID score of the MJHQ-30K benchmark. ### How to cite us ``` @misc{li2024playground, title={Playground v2.5: Three Insights towards Enhancing Aesthetic Quality in Text-to-Image Generation}, author={Daiqing Li and Aleks Kamko and Ehsan Akhgari and Ali Sabet and Linmiao Xu and Suhail Doshi}, year={2024}, eprint={2402.17245}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
paust/pko-t5-large
paust
"2023-06-28T17:03:42Z"
494,996
17
transformers
[ "transformers", "pytorch", "safetensors", "t5", "text2text-generation", "ko", "arxiv:2105.09680", "license:cc-by-4.0", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text2text-generation
"2022-05-16T11:59:52Z"
--- language: ko license: cc-by-4.0 --- # pko-t5-large [Source Code](https://github.com/paust-team/pko-t5) pko-t5 는 한국어 전용 데이터로 학습한 [t5 v1.1 모델](https://github.com/google-research/text-to-text-transfer-transformer/blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/released_checkpoints.md)입니다. 한국어를 tokenize 하기 위해서 sentencepiece 대신 OOV 가 없는 BBPE 를 사용했으며 한국어 데이터 (나무위키, 위키피디아, 모두의말뭉치 등..) 를 T5 의 span corruption task 를 사용해서 unsupervised learning 만 적용하여 학습을 진행했습니다. pko-t5 를 사용하실 때는 대상 task 에 파인튜닝하여 사용하시기 바랍니다. ## Usage transformers 의 API 를 사용하여 접근 가능합니다. tokenizer 를 사용할때는 `T5Tokenizer` 가 아니라 `T5TokenizerFast` 를 사용해주십시오. model 은 T5ForConditionalGeneration 를 그대로 활용하시면 됩니다. ### Example ```python from transformers import T5TokenizerFast, T5ForConditionalGeneration tokenizer = T5TokenizerFast.from_pretrained('paust/pko-t5-large') model = T5ForConditionalGeneration.from_pretrained('paust/pko-t5-large') input_ids = tokenizer(["qa question: 당신의 이름은 무엇인가요?"]).input_ids labels = tokenizer(["T5 입니다."]).input_ids outputs = model(input_ids=input_ids, labels=labels) print(f"loss={outputs.loss} logits={outputs.logits}") ``` ## Klue 평가 (dev) | | Model | ynat (macro F1) | sts (pearsonr/F1) | nli (acc) | ner (entity-level F1) | re (micro F1) | dp (LAS) | mrc (EM/F1) | |-----|------------------------------------------------------------------|-----------------|-------------------|-----------|-----------------------|---------------|-----------|-------------| | | Baseline | **87.30** | **93.20/86.13** | **89.50** | 86.06 | 71.06 | 87.93 | **75.26/-** | | FT | [pko-t5-small](https://huggingface.co/paust/pko-t5-small) (77M) | 86.21 | 77.99/77.01 | 69.20 | 82.60 | 66.46 | 93.15 | 43.81/46.58 | | FT | [pko-t5-base](https://huggingface.co/paust/pko-t5-base) (250M) | 87.29 | 90.25/83.43 | 79.73 | 87.80 | 67.23 | 97.28 | 61.53/64.74 | | FT | [pko-t5-large](https://huggingface.co/paust/pko-t5-large) (800M) | 87.12 | 92.05/85.24 | 84.96 | **88.18** | **75.17** | **97.60** | 68.01/71.44 | | MT | pko-t5-small | 84.54 | 68.50/72/02 | 51.16 | 74.69 | 66.11 | 80.40 | 43.60/46.28 | | MT | pko-t5-base | 86.89 | 83.96/80.30 | 72.03 | 85.27 | 66.59 | 95.05 | 61.11/63.94 | | MT | pko-t5-large | 87.57 | 91.93/86.29 | 83.63 | 87.41 | 71.34 | 96.99 | 70.70/73.72 | - FT: 싱글태스크 파인튜닝 / MT: 멀티태스크 파인튜닝 - [Baseline](https://arxiv.org/abs/2105.09680): KLUE 논문에서 소개된 dev set 에 대한 SOTA 점수 ## License [PAUST](https://paust.io)에서 만든 pko-t5는 [MIT license](https://github.com/paust-team/pko-t5/blob/main/LICENSE) 하에 공개되어 있습니다.
myshell-ai/MeloTTS-English
myshell-ai
"2024-03-01T17:34:55Z"
494,974
19
transformers
[ "transformers", "text-to-speech", "ko", "license:mit", "endpoints_compatible", "has_space", "region:us" ]
text-to-speech
"2024-02-29T14:52:43Z"
--- license: mit language: - ko pipeline_tag: text-to-speech --- # MeloTTS MeloTTS is a **high-quality multi-lingual** text-to-speech library by [MyShell.ai](https://myshell.ai). Supported languages include: | Model card | Example | | --- | --- | | [English](https://huggingface.co/myshell-ai/MeloTTS-English-v2) (American) | [Link](https://myshell-public-repo-hosting.s3.amazonaws.com/myshellttsbase/examples/en/EN-US/speed_1.0/sent_000.wav) | | [English](https://huggingface.co/myshell-ai/MeloTTS-English-v2) (British) | [Link](https://myshell-public-repo-hosting.s3.amazonaws.com/myshellttsbase/examples/en/EN-BR/speed_1.0/sent_000.wav) | | [English](https://huggingface.co/myshell-ai/MeloTTS-English-v2) (Indian) | [Link](https://myshell-public-repo-hosting.s3.amazonaws.com/myshellttsbase/examples/en/EN_INDIA/speed_1.0/sent_000.wav) | | [English](https://huggingface.co/myshell-ai/MeloTTS-English-v2) (Australian) | [Link](https://myshell-public-repo-hosting.s3.amazonaws.com/myshellttsbase/examples/en/EN-AU/speed_1.0/sent_000.wav) | | [English](https://huggingface.co/myshell-ai/MeloTTS-English-v2) (Default) | [Link](https://myshell-public-repo-hosting.s3.amazonaws.com/myshellttsbase/examples/en/EN-Default/speed_1.0/sent_000.wav) | | [Spanish](https://huggingface.co/myshell-ai/MeloTTS-Spanish) | [Link](https://myshell-public-repo-hosting.s3.amazonaws.com/myshellttsbase/examples/es/ES/speed_1.0/sent_000.wav) | | [French](https://huggingface.co/myshell-ai/MeloTTS-French) | [Link](https://myshell-public-repo-hosting.s3.amazonaws.com/myshellttsbase/examples/fr/FR/speed_1.0/sent_000.wav) | | [Chinese](https://huggingface.co/myshell-ai/MeloTTS-Chinese) (mix EN) | [Link](https://myshell-public-repo-hosting.s3.amazonaws.com/myshellttsbase/examples/zh/ZH/speed_1.0/sent_008.wav) | | [Japanese](https://huggingface.co/myshell-ai/MeloTTS-Japanese) | [Link](https://myshell-public-repo-hosting.s3.amazonaws.com/myshellttsbase/examples/jp/JP/speed_1.0/sent_000.wav) | | [Korean](https://huggingface.co/myshell-ai/MeloTTS-Korean/) | [Link](https://myshell-public-repo-hosting.s3.amazonaws.com/myshellttsbase/examples/kr/KR/speed_1.0/sent_000.wav) | Some other features include: - The Chinese speaker supports `mixed Chinese and English`. - Fast enough for `CPU real-time inference`. ## Usage ### Without Installation An unofficial [live demo](https://huggingface.co/spaces/mrfakename/MeloTTS) is hosted on Hugging Face Spaces. #### Use it on MyShell There are hundreds of TTS models on MyShell, much more than MeloTTS. See examples [here](https://github.com/myshell-ai/MeloTTS/blob/main/docs/quick_use.md#use-melotts-without-installation). More can be found at the widget center of [MyShell.ai](https://app.myshell.ai/robot-workshop). ### Install and Use Locally Follow the installation steps [here](https://github.com/myshell-ai/MeloTTS/blob/main/docs/install.md#linux-and-macos-install) before using the following snippet: ```python from melo.api import TTS # Speed is adjustable speed = 1.0 # CPU is sufficient for real-time inference. # You can set it manually to 'cpu' or 'cuda' or 'cuda:0' or 'mps' device = 'auto' # Will automatically use GPU if available # English text = "Did you ever hear a folk tale about a giant turtle?" model = TTS(language='EN', device=device) speaker_ids = model.hps.data.spk2id # American accent output_path = 'en-us.wav' model.tts_to_file(text, speaker_ids['EN-US'], output_path, speed=speed) # British accent output_path = 'en-br.wav' model.tts_to_file(text, speaker_ids['EN-BR'], output_path, speed=speed) # Indian accent output_path = 'en-india.wav' model.tts_to_file(text, speaker_ids['EN_INDIA'], output_path, speed=speed) # Australian accent output_path = 'en-au.wav' model.tts_to_file(text, speaker_ids['EN-AU'], output_path, speed=speed) # Default accent output_path = 'en-default.wav' model.tts_to_file(text, speaker_ids['EN-Default'], output_path, speed=speed) ``` ## Join the Community **Open Source AI Grant** We are actively sponsoring open-source AI projects. The sponsorship includes GPU resources, fundings and intellectual support (collaboration with top research labs). We welcome both reseach and engineering projects, as long as the open-source community needs them. Please contact [Zengyi Qin](https://www.qinzy.tech/) if you are interested. **Contributing** If you find this work useful, please consider contributing to the GitHub [repo](https://github.com/myshell-ai/MeloTTS). - Many thanks to [@fakerybakery](https://github.com/fakerybakery) for adding the Web UI and CLI part. ## License This library is under MIT License, which means it is free for both commercial and non-commercial use. ## Acknowledgements This implementation is based on [TTS](https://github.com/coqui-ai/TTS), [VITS](https://github.com/jaywalnut310/vits), [VITS2](https://github.com/daniilrobnikov/vits2) and [Bert-VITS2](https://github.com/fishaudio/Bert-VITS2). We appreciate their awesome work.
openai/whisper-small
openai
"2024-02-29T10:57:38Z"
493,790
148
transformers
[ "transformers", "pytorch", "tf", "jax", "safetensors", "whisper", "automatic-speech-recognition", "audio", "hf-asr-leaderboard", "en", "zh", "de", "es", "ru", "ko", "fr", "ja", "pt", "tr", "pl", "ca", "nl", "ar", "sv", "it", "id", "hi", "fi", "vi", "he", "uk", "el", "ms", "cs", "ro", "da", "hu", "ta", "no", "th", "ur", "hr", "bg", "lt", "la", "mi", "ml", "cy", "sk", "te", "fa", "lv", "bn", "sr", "az", "sl", "kn", "et", "mk", "br", "eu", "is", "hy", "ne", "mn", "bs", "kk", "sq", "sw", "gl", "mr", "pa", "si", "km", "sn", "yo", "so", "af", "oc", "ka", "be", "tg", "sd", "gu", "am", "yi", "lo", "uz", "fo", "ht", "ps", "tk", "nn", "mt", "sa", "lb", "my", "bo", "tl", "mg", "as", "tt", "haw", "ln", "ha", "ba", "jw", "su", "arxiv:2212.04356", "license:apache-2.0", "model-index", "endpoints_compatible", "has_space", "region:us" ]
automatic-speech-recognition
"2022-09-26T06:51:27Z"
--- language: - en - zh - de - es - ru - ko - fr - ja - pt - tr - pl - ca - nl - ar - sv - it - id - hi - fi - vi - he - uk - el - ms - cs - ro - da - hu - ta - no - th - ur - hr - bg - lt - la - mi - ml - cy - sk - te - fa - lv - bn - sr - az - sl - kn - et - mk - br - eu - is - hy - ne - mn - bs - kk - sq - sw - gl - mr - pa - si - km - sn - yo - so - af - oc - ka - be - tg - sd - gu - am - yi - lo - uz - fo - ht - ps - tk - nn - mt - sa - lb - my - bo - tl - mg - as - tt - haw - ln - ha - ba - jw - su tags: - audio - automatic-speech-recognition - hf-asr-leaderboard widget: - example_title: Librispeech sample 1 src: https://cdn-media.huggingface.co/speech_samples/sample1.flac - example_title: Librispeech sample 2 src: https://cdn-media.huggingface.co/speech_samples/sample2.flac model-index: - name: whisper-small results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: LibriSpeech (clean) type: librispeech_asr config: clean split: test args: language: en metrics: - name: Test WER type: wer value: 3.432213777886737 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: LibriSpeech (other) type: librispeech_asr config: other split: test args: language: en metrics: - name: Test WER type: wer value: 7.628304527060248 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: hi split: test args: language: hi metrics: - name: Test WER type: wer value: 87.3 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 13.0 type: mozilla-foundation/common_voice_13_0 config: dv split: test args: language: dv metrics: - name: Wer type: wer value: 125.69809089960707 pipeline_tag: automatic-speech-recognition license: apache-2.0 --- # Whisper Whisper is a pre-trained model for automatic speech recognition (ASR) and speech translation. Trained on 680k hours of labelled data, Whisper models demonstrate a strong ability to generalise to many datasets and domains **without** the need for fine-tuning. Whisper was proposed in the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://arxiv.org/abs/2212.04356) by Alec Radford et al from OpenAI. The original code repository can be found [here](https://github.com/openai/whisper). **Disclaimer**: Content for this model card has partly been written by the Hugging Face team, and parts of it were copied and pasted from the original model card. ## Model details Whisper is a Transformer based encoder-decoder model, also referred to as a _sequence-to-sequence_ model. It was trained on 680k hours of labelled speech data annotated using large-scale weak supervision. The models were trained on either English-only data or multilingual data. The English-only models were trained on the task of speech recognition. The multilingual models were trained on both speech recognition and speech translation. For speech recognition, the model predicts transcriptions in the *same* language as the audio. For speech translation, the model predicts transcriptions to a *different* language to the audio. Whisper checkpoints come in five configurations of varying model sizes. The smallest four are trained on either English-only or multilingual data. The largest checkpoints are multilingual only. All ten of the pre-trained checkpoints are available on the [Hugging Face Hub](https://huggingface.co/models?search=openai/whisper). The checkpoints are summarised in the following table with links to the models on the Hub: | Size | Parameters | English-only | Multilingual | |----------|------------|------------------------------------------------------|-----------------------------------------------------| | tiny | 39 M | [✓](https://huggingface.co/openai/whisper-tiny.en) | [✓](https://huggingface.co/openai/whisper-tiny) | | base | 74 M | [✓](https://huggingface.co/openai/whisper-base.en) | [✓](https://huggingface.co/openai/whisper-base) | | small | 244 M | [✓](https://huggingface.co/openai/whisper-small.en) | [✓](https://huggingface.co/openai/whisper-small) | | medium | 769 M | [✓](https://huggingface.co/openai/whisper-medium.en) | [✓](https://huggingface.co/openai/whisper-medium) | | large | 1550 M | x | [✓](https://huggingface.co/openai/whisper-large) | | large-v2 | 1550 M | x | [✓](https://huggingface.co/openai/whisper-large-v2) | # Usage To transcribe audio samples, the model has to be used alongside a [`WhisperProcessor`](https://huggingface.co/docs/transformers/model_doc/whisper#transformers.WhisperProcessor). The `WhisperProcessor` is used to: 1. Pre-process the audio inputs (converting them to log-Mel spectrograms for the model) 2. Post-process the model outputs (converting them from tokens to text) The model is informed of which task to perform (transcription or translation) by passing the appropriate "context tokens". These context tokens are a sequence of tokens that are given to the decoder at the start of the decoding process, and take the following order: 1. The transcription always starts with the `<|startoftranscript|>` token 2. The second token is the language token (e.g. `<|en|>` for English) 3. The third token is the "task token". It can take one of two values: `<|transcribe|>` for speech recognition or `<|translate|>` for speech translation 4. In addition, a `<|notimestamps|>` token is added if the model should not include timestamp prediction Thus, a typical sequence of context tokens might look as follows: ``` <|startoftranscript|> <|en|> <|transcribe|> <|notimestamps|> ``` Which tells the model to decode in English, under the task of speech recognition, and not to predict timestamps. These tokens can either be forced or un-forced. If they are forced, the model is made to predict each token at each position. This allows one to control the output language and task for the Whisper model. If they are un-forced, the Whisper model will automatically predict the output langauge and task itself. The context tokens can be set accordingly: ```python model.config.forced_decoder_ids = WhisperProcessor.get_decoder_prompt_ids(language="english", task="transcribe") ``` Which forces the model to predict in English under the task of speech recognition. ## Transcription ### English to English In this example, the context tokens are 'unforced', meaning the model automatically predicts the output language (English) and task (transcribe). ```python >>> from transformers import WhisperProcessor, WhisperForConditionalGeneration >>> from datasets import load_dataset >>> # load model and processor >>> processor = WhisperProcessor.from_pretrained("openai/whisper-small") >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small") >>> model.config.forced_decoder_ids = None >>> # load dummy dataset and read audio files >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> sample = ds[0]["audio"] >>> input_features = processor(sample["array"], sampling_rate=sample["sampling_rate"], return_tensors="pt").input_features >>> # generate token ids >>> predicted_ids = model.generate(input_features) >>> # decode token ids to text >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=False) ['<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.<|endoftext|>'] >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) [' Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.'] ``` The context tokens can be removed from the start of the transcription by setting `skip_special_tokens=True`. ### French to French The following example demonstrates French to French transcription by setting the decoder ids appropriately. ```python >>> from transformers import WhisperProcessor, WhisperForConditionalGeneration >>> from datasets import Audio, load_dataset >>> # load model and processor >>> processor = WhisperProcessor.from_pretrained("openai/whisper-small") >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small") >>> forced_decoder_ids = processor.get_decoder_prompt_ids(language="french", task="transcribe") >>> # load streaming dataset and read first audio sample >>> ds = load_dataset("common_voice", "fr", split="test", streaming=True) >>> ds = ds.cast_column("audio", Audio(sampling_rate=16_000)) >>> input_speech = next(iter(ds))["audio"] >>> input_features = processor(input_speech["array"], sampling_rate=input_speech["sampling_rate"], return_tensors="pt").input_features >>> # generate token ids >>> predicted_ids = model.generate(input_features, forced_decoder_ids=forced_decoder_ids) >>> # decode token ids to text >>> transcription = processor.batch_decode(predicted_ids) ['<|startoftranscript|><|fr|><|transcribe|><|notimestamps|> Un vrai travail intéressant va enfin être mené sur ce sujet.<|endoftext|>'] >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) [' Un vrai travail intéressant va enfin être mené sur ce sujet.'] ``` ## Translation Setting the task to "translate" forces the Whisper model to perform speech translation. ### French to English ```python >>> from transformers import WhisperProcessor, WhisperForConditionalGeneration >>> from datasets import Audio, load_dataset >>> # load model and processor >>> processor = WhisperProcessor.from_pretrained("openai/whisper-small") >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small") >>> forced_decoder_ids = processor.get_decoder_prompt_ids(language="french", task="translate") >>> # load streaming dataset and read first audio sample >>> ds = load_dataset("common_voice", "fr", split="test", streaming=True) >>> ds = ds.cast_column("audio", Audio(sampling_rate=16_000)) >>> input_speech = next(iter(ds))["audio"] >>> input_features = processor(input_speech["array"], sampling_rate=input_speech["sampling_rate"], return_tensors="pt").input_features >>> # generate token ids >>> predicted_ids = model.generate(input_features, forced_decoder_ids=forced_decoder_ids) >>> # decode token ids to text >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) [' A very interesting work, we will finally be given on this subject.'] ``` ## Evaluation This code snippet shows how to evaluate Whisper Small on [LibriSpeech test-clean](https://huggingface.co/datasets/librispeech_asr): ```python >>> from datasets import load_dataset >>> from transformers import WhisperForConditionalGeneration, WhisperProcessor >>> import torch >>> from evaluate import load >>> librispeech_test_clean = load_dataset("librispeech_asr", "clean", split="test") >>> processor = WhisperProcessor.from_pretrained("openai/whisper-small") >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small").to("cuda") >>> def map_to_pred(batch): >>> audio = batch["audio"] >>> input_features = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt").input_features >>> batch["reference"] = processor.tokenizer._normalize(batch['text']) >>> >>> with torch.no_grad(): >>> predicted_ids = model.generate(input_features.to("cuda"))[0] >>> transcription = processor.decode(predicted_ids) >>> batch["prediction"] = processor.tokenizer._normalize(transcription) >>> return batch >>> result = librispeech_test_clean.map(map_to_pred) >>> wer = load("wer") >>> print(100 * wer.compute(references=result["reference"], predictions=result["prediction"])) 3.432213777886737 ``` ## Long-Form Transcription The Whisper model is intrinsically designed to work on audio samples of up to 30s in duration. However, by using a chunking algorithm, it can be used to transcribe audio samples of up to arbitrary length. This is possible through Transformers [`pipeline`](https://huggingface.co/docs/transformers/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline) method. Chunking is enabled by setting `chunk_length_s=30` when instantiating the pipeline. With chunking enabled, the pipeline can be run with batched inference. It can also be extended to predict sequence level timestamps by passing `return_timestamps=True`: ```python >>> import torch >>> from transformers import pipeline >>> from datasets import load_dataset >>> device = "cuda:0" if torch.cuda.is_available() else "cpu" >>> pipe = pipeline( >>> "automatic-speech-recognition", >>> model="openai/whisper-small", >>> chunk_length_s=30, >>> device=device, >>> ) >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> sample = ds[0]["audio"] >>> prediction = pipe(sample.copy(), batch_size=8)["text"] " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel." >>> # we can also return timestamps for the predictions >>> prediction = pipe(sample.copy(), batch_size=8, return_timestamps=True)["chunks"] [{'text': ' Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.', 'timestamp': (0.0, 5.44)}] ``` Refer to the blog post [ASR Chunking](https://huggingface.co/blog/asr-chunking) for more details on the chunking algorithm. ## Fine-Tuning The pre-trained Whisper model demonstrates a strong ability to generalise to different datasets and domains. However, its predictive capabilities can be improved further for certain languages and tasks through *fine-tuning*. The blog post [Fine-Tune Whisper with 🤗 Transformers](https://huggingface.co/blog/fine-tune-whisper) provides a step-by-step guide to fine-tuning the Whisper model with as little as 5 hours of labelled data. ### Evaluated Use The primary intended users of these models are AI researchers studying robustness, generalization, capabilities, biases, and constraints of the current model. However, Whisper is also potentially quite useful as an ASR solution for developers, especially for English speech recognition. We recognize that once models are released, it is impossible to restrict access to only “intended” uses or to draw reasonable guidelines around what is or is not research. The models are primarily trained and evaluated on ASR and speech translation to English tasks. They show strong ASR results in ~10 languages. They may exhibit additional capabilities, particularly if fine-tuned on certain tasks like voice activity detection, speaker classification, or speaker diarization but have not been robustly evaluated in these areas. We strongly recommend that users perform robust evaluations of the models in a particular context and domain before deploying them. In particular, we caution against using Whisper models to transcribe recordings of individuals taken without their consent or purporting to use these models for any kind of subjective classification. We recommend against use in high-risk domains like decision-making contexts, where flaws in accuracy can lead to pronounced flaws in outcomes. The models are intended to transcribe and translate speech, use of the model for classification is not only not evaluated but also not appropriate, particularly to infer human attributes. ## Training Data The models are trained on 680,000 hours of audio and the corresponding transcripts collected from the internet. 65% of this data (or 438,000 hours) represents English-language audio and matched English transcripts, roughly 18% (or 126,000 hours) represents non-English audio and English transcripts, while the final 17% (or 117,000 hours) represents non-English audio and the corresponding transcript. This non-English data represents 98 different languages. As discussed in [the accompanying paper](https://cdn.openai.com/papers/whisper.pdf), we see that performance on transcription in a given language is directly correlated with the amount of training data we employ in that language. ## Performance and Limitations Our studies show that, over many existing ASR systems, the models exhibit improved robustness to accents, background noise, technical language, as well as zero shot translation from multiple languages into English; and that accuracy on speech recognition and translation is near the state-of-the-art level. However, because the models are trained in a weakly supervised manner using large-scale noisy data, the predictions may include texts that are not actually spoken in the audio input (i.e. hallucination). We hypothesize that this happens because, given their general knowledge of language, the models combine trying to predict the next word in audio with trying to transcribe the audio itself. Our models perform unevenly across languages, and we observe lower accuracy on low-resource and/or low-discoverability languages or languages where we have less training data. The models also exhibit disparate performance on different accents and dialects of particular languages, which may include higher word error rate across speakers of different genders, races, ages, or other demographic criteria. Our full evaluation results are presented in [the paper accompanying this release](https://cdn.openai.com/papers/whisper.pdf). In addition, the sequence-to-sequence architecture of the model makes it prone to generating repetitive texts, which can be mitigated to some degree by beam search and temperature scheduling but not perfectly. Further analysis on these limitations are provided in [the paper](https://cdn.openai.com/papers/whisper.pdf). It is likely that this behavior and hallucinations may be worse on lower-resource and/or lower-discoverability languages. ## Broader Implications We anticipate that Whisper models’ transcription capabilities may be used for improving accessibility tools. While Whisper models cannot be used for real-time transcription out of the box – their speed and size suggest that others may be able to build applications on top of them that allow for near-real-time speech recognition and translation. The real value of beneficial applications built on top of Whisper models suggests that the disparate performance of these models may have real economic implications. There are also potential dual use concerns that come with releasing Whisper. While we hope the technology will be used primarily for beneficial purposes, making ASR technology more accessible could enable more actors to build capable surveillance technologies or scale up existing surveillance efforts, as the speed and accuracy allow for affordable automatic transcription and translation of large volumes of audio communication. Moreover, these models may have some capabilities to recognize specific individuals out of the box, which in turn presents safety concerns related both to dual use and disparate performance. In practice, we expect that the cost of transcription is not the limiting factor of scaling up surveillance projects. ### BibTeX entry and citation info ```bibtex @misc{radford2022whisper, doi = {10.48550/ARXIV.2212.04356}, url = {https://arxiv.org/abs/2212.04356}, author = {Radford, Alec and Kim, Jong Wook and Xu, Tao and Brockman, Greg and McLeavey, Christine and Sutskever, Ilya}, title = {Robust Speech Recognition via Large-Scale Weak Supervision}, publisher = {arXiv}, year = {2022}, copyright = {arXiv.org perpetual, non-exclusive license} } ```
jinaai/jina-embeddings-v2-small-en
jinaai
"2024-04-11T13:20:39Z"
491,720
103
sentence-transformers
[ "sentence-transformers", "pytorch", "coreml", "onnx", "safetensors", "bert", "feature-extraction", "sentence-similarity", "mteb", "custom_code", "en", "dataset:jinaai/negation-dataset", "arxiv:2108.12409", "arxiv:2310.19923", "license:apache-2.0", "model-index", "has_space", "region:us" ]
feature-extraction
"2023-09-27T20:17:27Z"
--- tags: - sentence-transformers - feature-extraction - sentence-similarity - mteb datasets: - jinaai/negation-dataset language: en inference: false license: apache-2.0 model-index: - name: jina-embedding-s-en-v2 results: - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (en) config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 71.35820895522387 - type: ap value: 33.99931933598115 - type: f1 value: 65.3853685535555 - task: type: Classification dataset: type: mteb/amazon_polarity name: MTEB AmazonPolarityClassification config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 82.90140000000001 - type: ap value: 78.01434597815617 - type: f1 value: 82.83357802722676 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (en) config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 40.88999999999999 - type: f1 value: 39.209432767163456 - task: type: Retrieval dataset: type: arguana name: MTEB ArguAna config: default split: test revision: None metrics: - type: map_at_1 value: 23.257 - type: map_at_10 value: 37.946000000000005 - type: map_at_100 value: 39.17 - type: map_at_1000 value: 39.181 - type: map_at_3 value: 32.99 - type: map_at_5 value: 35.467999999999996 - type: mrr_at_1 value: 23.541999999999998 - type: mrr_at_10 value: 38.057 - type: mrr_at_100 value: 39.289 - type: mrr_at_1000 value: 39.299 - type: mrr_at_3 value: 33.096 - type: mrr_at_5 value: 35.628 - type: ndcg_at_1 value: 23.257 - type: ndcg_at_10 value: 46.729 - type: ndcg_at_100 value: 51.900999999999996 - type: ndcg_at_1000 value: 52.16 - type: ndcg_at_3 value: 36.323 - type: ndcg_at_5 value: 40.766999999999996 - type: precision_at_1 value: 23.257 - type: precision_at_10 value: 7.510999999999999 - type: precision_at_100 value: 0.976 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 15.339 - type: precision_at_5 value: 11.350999999999999 - type: recall_at_1 value: 23.257 - type: recall_at_10 value: 75.107 - type: recall_at_100 value: 97.58200000000001 - type: recall_at_1000 value: 99.57300000000001 - type: recall_at_3 value: 46.017 - type: recall_at_5 value: 56.757000000000005 - task: type: Clustering dataset: type: mteb/arxiv-clustering-p2p name: MTEB ArxivClusteringP2P config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 44.02420878391967 - task: type: Clustering dataset: type: mteb/arxiv-clustering-s2s name: MTEB ArxivClusteringS2S config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 35.16136856000258 - task: type: Reranking dataset: type: mteb/askubuntudupquestions-reranking name: MTEB AskUbuntuDupQuestions config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 59.61809790513646 - type: mrr value: 73.07215406938397 - task: type: STS dataset: type: mteb/biosses-sts name: MTEB BIOSSES config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 82.0167350090749 - type: cos_sim_spearman value: 80.51569002630401 - type: euclidean_pearson value: 81.46820525099726 - type: euclidean_spearman value: 80.51569002630401 - type: manhattan_pearson value: 81.35596555056757 - type: manhattan_spearman value: 80.12592210903303 - task: type: Classification dataset: type: mteb/banking77 name: MTEB Banking77Classification config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 78.25 - type: f1 value: 77.34950913540605 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-p2p name: MTEB BiorxivClusteringP2P config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 35.57238596005698 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-s2s name: MTEB BiorxivClusteringS2S config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 29.066444306196683 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackAndroidRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 31.891000000000002 - type: map_at_10 value: 42.772 - type: map_at_100 value: 44.108999999999995 - type: map_at_1000 value: 44.236 - type: map_at_3 value: 39.289 - type: map_at_5 value: 41.113 - type: mrr_at_1 value: 39.342 - type: mrr_at_10 value: 48.852000000000004 - type: mrr_at_100 value: 49.534 - type: mrr_at_1000 value: 49.582 - type: mrr_at_3 value: 46.089999999999996 - type: mrr_at_5 value: 47.685 - type: ndcg_at_1 value: 39.342 - type: ndcg_at_10 value: 48.988 - type: ndcg_at_100 value: 53.854 - type: ndcg_at_1000 value: 55.955 - type: ndcg_at_3 value: 43.877 - type: ndcg_at_5 value: 46.027 - type: precision_at_1 value: 39.342 - type: precision_at_10 value: 9.285 - type: precision_at_100 value: 1.488 - type: precision_at_1000 value: 0.194 - type: precision_at_3 value: 20.696 - type: precision_at_5 value: 14.878 - type: recall_at_1 value: 31.891000000000002 - type: recall_at_10 value: 60.608 - type: recall_at_100 value: 81.025 - type: recall_at_1000 value: 94.883 - type: recall_at_3 value: 45.694 - type: recall_at_5 value: 51.684 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackEnglishRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 28.778 - type: map_at_10 value: 37.632 - type: map_at_100 value: 38.800000000000004 - type: map_at_1000 value: 38.934999999999995 - type: map_at_3 value: 35.293 - type: map_at_5 value: 36.547000000000004 - type: mrr_at_1 value: 35.35 - type: mrr_at_10 value: 42.936 - type: mrr_at_100 value: 43.69 - type: mrr_at_1000 value: 43.739 - type: mrr_at_3 value: 41.062 - type: mrr_at_5 value: 42.097 - type: ndcg_at_1 value: 35.35 - type: ndcg_at_10 value: 42.528 - type: ndcg_at_100 value: 46.983000000000004 - type: ndcg_at_1000 value: 49.187999999999995 - type: ndcg_at_3 value: 39.271 - type: ndcg_at_5 value: 40.654 - type: precision_at_1 value: 35.35 - type: precision_at_10 value: 7.828 - type: precision_at_100 value: 1.3010000000000002 - type: precision_at_1000 value: 0.17700000000000002 - type: precision_at_3 value: 18.96 - type: precision_at_5 value: 13.120999999999999 - type: recall_at_1 value: 28.778 - type: recall_at_10 value: 50.775000000000006 - type: recall_at_100 value: 69.66799999999999 - type: recall_at_1000 value: 83.638 - type: recall_at_3 value: 40.757 - type: recall_at_5 value: 44.86 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackGamingRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 37.584 - type: map_at_10 value: 49.69 - type: map_at_100 value: 50.639 - type: map_at_1000 value: 50.702999999999996 - type: map_at_3 value: 46.61 - type: map_at_5 value: 48.486000000000004 - type: mrr_at_1 value: 43.009 - type: mrr_at_10 value: 52.949999999999996 - type: mrr_at_100 value: 53.618 - type: mrr_at_1000 value: 53.65299999999999 - type: mrr_at_3 value: 50.605999999999995 - type: mrr_at_5 value: 52.095 - type: ndcg_at_1 value: 43.009 - type: ndcg_at_10 value: 55.278000000000006 - type: ndcg_at_100 value: 59.134 - type: ndcg_at_1000 value: 60.528999999999996 - type: ndcg_at_3 value: 50.184 - type: ndcg_at_5 value: 52.919000000000004 - type: precision_at_1 value: 43.009 - type: precision_at_10 value: 8.821 - type: precision_at_100 value: 1.161 - type: precision_at_1000 value: 0.133 - type: precision_at_3 value: 22.424 - type: precision_at_5 value: 15.436 - type: recall_at_1 value: 37.584 - type: recall_at_10 value: 68.514 - type: recall_at_100 value: 85.099 - type: recall_at_1000 value: 95.123 - type: recall_at_3 value: 55.007 - type: recall_at_5 value: 61.714999999999996 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackGisRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 24.7 - type: map_at_10 value: 32.804 - type: map_at_100 value: 33.738 - type: map_at_1000 value: 33.825 - type: map_at_3 value: 30.639 - type: map_at_5 value: 31.781 - type: mrr_at_1 value: 26.328000000000003 - type: mrr_at_10 value: 34.679 - type: mrr_at_100 value: 35.510000000000005 - type: mrr_at_1000 value: 35.577999999999996 - type: mrr_at_3 value: 32.58 - type: mrr_at_5 value: 33.687 - type: ndcg_at_1 value: 26.328000000000003 - type: ndcg_at_10 value: 37.313 - type: ndcg_at_100 value: 42.004000000000005 - type: ndcg_at_1000 value: 44.232 - type: ndcg_at_3 value: 33.076 - type: ndcg_at_5 value: 34.966 - type: precision_at_1 value: 26.328000000000003 - type: precision_at_10 value: 5.627 - type: precision_at_100 value: 0.8410000000000001 - type: precision_at_1000 value: 0.106 - type: precision_at_3 value: 14.011000000000001 - type: precision_at_5 value: 9.582 - type: recall_at_1 value: 24.7 - type: recall_at_10 value: 49.324 - type: recall_at_100 value: 71.018 - type: recall_at_1000 value: 87.905 - type: recall_at_3 value: 37.7 - type: recall_at_5 value: 42.281 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackMathematicaRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 14.350999999999999 - type: map_at_10 value: 21.745 - type: map_at_100 value: 22.731 - type: map_at_1000 value: 22.852 - type: map_at_3 value: 19.245 - type: map_at_5 value: 20.788 - type: mrr_at_1 value: 18.159 - type: mrr_at_10 value: 25.833000000000002 - type: mrr_at_100 value: 26.728 - type: mrr_at_1000 value: 26.802 - type: mrr_at_3 value: 23.383000000000003 - type: mrr_at_5 value: 24.887999999999998 - type: ndcg_at_1 value: 18.159 - type: ndcg_at_10 value: 26.518000000000004 - type: ndcg_at_100 value: 31.473000000000003 - type: ndcg_at_1000 value: 34.576 - type: ndcg_at_3 value: 21.907 - type: ndcg_at_5 value: 24.39 - type: precision_at_1 value: 18.159 - type: precision_at_10 value: 4.938 - type: precision_at_100 value: 0.853 - type: precision_at_1000 value: 0.125 - type: precision_at_3 value: 10.655000000000001 - type: precision_at_5 value: 7.985 - type: recall_at_1 value: 14.350999999999999 - type: recall_at_10 value: 37.284 - type: recall_at_100 value: 59.11300000000001 - type: recall_at_1000 value: 81.634 - type: recall_at_3 value: 24.753 - type: recall_at_5 value: 30.979 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackPhysicsRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 26.978 - type: map_at_10 value: 36.276 - type: map_at_100 value: 37.547000000000004 - type: map_at_1000 value: 37.678 - type: map_at_3 value: 33.674 - type: map_at_5 value: 35.119 - type: mrr_at_1 value: 32.916000000000004 - type: mrr_at_10 value: 41.798 - type: mrr_at_100 value: 42.72 - type: mrr_at_1000 value: 42.778 - type: mrr_at_3 value: 39.493 - type: mrr_at_5 value: 40.927 - type: ndcg_at_1 value: 32.916000000000004 - type: ndcg_at_10 value: 41.81 - type: ndcg_at_100 value: 47.284 - type: ndcg_at_1000 value: 49.702 - type: ndcg_at_3 value: 37.486999999999995 - type: ndcg_at_5 value: 39.597 - type: precision_at_1 value: 32.916000000000004 - type: precision_at_10 value: 7.411 - type: precision_at_100 value: 1.189 - type: precision_at_1000 value: 0.158 - type: precision_at_3 value: 17.581 - type: precision_at_5 value: 12.397 - type: recall_at_1 value: 26.978 - type: recall_at_10 value: 52.869 - type: recall_at_100 value: 75.78399999999999 - type: recall_at_1000 value: 91.545 - type: recall_at_3 value: 40.717 - type: recall_at_5 value: 46.168 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackProgrammersRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 24.641 - type: map_at_10 value: 32.916000000000004 - type: map_at_100 value: 34.165 - type: map_at_1000 value: 34.286 - type: map_at_3 value: 30.335 - type: map_at_5 value: 31.569000000000003 - type: mrr_at_1 value: 30.593999999999998 - type: mrr_at_10 value: 38.448 - type: mrr_at_100 value: 39.299 - type: mrr_at_1000 value: 39.362 - type: mrr_at_3 value: 36.244 - type: mrr_at_5 value: 37.232 - type: ndcg_at_1 value: 30.593999999999998 - type: ndcg_at_10 value: 38.2 - type: ndcg_at_100 value: 43.742 - type: ndcg_at_1000 value: 46.217000000000006 - type: ndcg_at_3 value: 33.925 - type: ndcg_at_5 value: 35.394 - type: precision_at_1 value: 30.593999999999998 - type: precision_at_10 value: 6.895 - type: precision_at_100 value: 1.1320000000000001 - type: precision_at_1000 value: 0.153 - type: precision_at_3 value: 16.096 - type: precision_at_5 value: 11.05 - type: recall_at_1 value: 24.641 - type: recall_at_10 value: 48.588 - type: recall_at_100 value: 72.841 - type: recall_at_1000 value: 89.535 - type: recall_at_3 value: 36.087 - type: recall_at_5 value: 40.346 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 24.79425 - type: map_at_10 value: 33.12033333333333 - type: map_at_100 value: 34.221333333333334 - type: map_at_1000 value: 34.3435 - type: map_at_3 value: 30.636583333333338 - type: map_at_5 value: 31.974083333333326 - type: mrr_at_1 value: 29.242416666666664 - type: mrr_at_10 value: 37.11675 - type: mrr_at_100 value: 37.93783333333334 - type: mrr_at_1000 value: 38.003083333333336 - type: mrr_at_3 value: 34.904666666666664 - type: mrr_at_5 value: 36.12916666666667 - type: ndcg_at_1 value: 29.242416666666664 - type: ndcg_at_10 value: 38.03416666666667 - type: ndcg_at_100 value: 42.86674999999999 - type: ndcg_at_1000 value: 45.34550000000001 - type: ndcg_at_3 value: 33.76466666666666 - type: ndcg_at_5 value: 35.668666666666674 - type: precision_at_1 value: 29.242416666666664 - type: precision_at_10 value: 6.589833333333334 - type: precision_at_100 value: 1.0693333333333332 - type: precision_at_1000 value: 0.14641666666666667 - type: precision_at_3 value: 15.430749999999998 - type: precision_at_5 value: 10.833833333333333 - type: recall_at_1 value: 24.79425 - type: recall_at_10 value: 48.582916666666655 - type: recall_at_100 value: 69.88499999999999 - type: recall_at_1000 value: 87.211 - type: recall_at_3 value: 36.625499999999995 - type: recall_at_5 value: 41.553999999999995 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackStatsRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 22.767 - type: map_at_10 value: 28.450999999999997 - type: map_at_100 value: 29.332 - type: map_at_1000 value: 29.426000000000002 - type: map_at_3 value: 26.379 - type: map_at_5 value: 27.584999999999997 - type: mrr_at_1 value: 25.46 - type: mrr_at_10 value: 30.974 - type: mrr_at_100 value: 31.784000000000002 - type: mrr_at_1000 value: 31.857999999999997 - type: mrr_at_3 value: 28.962 - type: mrr_at_5 value: 30.066 - type: ndcg_at_1 value: 25.46 - type: ndcg_at_10 value: 32.041 - type: ndcg_at_100 value: 36.522 - type: ndcg_at_1000 value: 39.101 - type: ndcg_at_3 value: 28.152 - type: ndcg_at_5 value: 30.03 - type: precision_at_1 value: 25.46 - type: precision_at_10 value: 4.893 - type: precision_at_100 value: 0.77 - type: precision_at_1000 value: 0.107 - type: precision_at_3 value: 11.605 - type: precision_at_5 value: 8.19 - type: recall_at_1 value: 22.767 - type: recall_at_10 value: 40.71 - type: recall_at_100 value: 61.334999999999994 - type: recall_at_1000 value: 80.567 - type: recall_at_3 value: 30.198000000000004 - type: recall_at_5 value: 34.803 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackTexRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 16.722 - type: map_at_10 value: 22.794 - type: map_at_100 value: 23.7 - type: map_at_1000 value: 23.822 - type: map_at_3 value: 20.781 - type: map_at_5 value: 22.024 - type: mrr_at_1 value: 20.061999999999998 - type: mrr_at_10 value: 26.346999999999998 - type: mrr_at_100 value: 27.153 - type: mrr_at_1000 value: 27.233 - type: mrr_at_3 value: 24.375 - type: mrr_at_5 value: 25.593 - type: ndcg_at_1 value: 20.061999999999998 - type: ndcg_at_10 value: 26.785999999999998 - type: ndcg_at_100 value: 31.319999999999997 - type: ndcg_at_1000 value: 34.346 - type: ndcg_at_3 value: 23.219 - type: ndcg_at_5 value: 25.107000000000003 - type: precision_at_1 value: 20.061999999999998 - type: precision_at_10 value: 4.78 - type: precision_at_100 value: 0.83 - type: precision_at_1000 value: 0.125 - type: precision_at_3 value: 10.874 - type: precision_at_5 value: 7.956 - type: recall_at_1 value: 16.722 - type: recall_at_10 value: 35.204 - type: recall_at_100 value: 55.797 - type: recall_at_1000 value: 77.689 - type: recall_at_3 value: 25.245 - type: recall_at_5 value: 30.115 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackUnixRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 24.842 - type: map_at_10 value: 32.917 - type: map_at_100 value: 33.961000000000006 - type: map_at_1000 value: 34.069 - type: map_at_3 value: 30.595 - type: map_at_5 value: 31.837 - type: mrr_at_1 value: 29.011 - type: mrr_at_10 value: 36.977 - type: mrr_at_100 value: 37.814 - type: mrr_at_1000 value: 37.885999999999996 - type: mrr_at_3 value: 34.966 - type: mrr_at_5 value: 36.043 - type: ndcg_at_1 value: 29.011 - type: ndcg_at_10 value: 37.735 - type: ndcg_at_100 value: 42.683 - type: ndcg_at_1000 value: 45.198 - type: ndcg_at_3 value: 33.650000000000006 - type: ndcg_at_5 value: 35.386 - type: precision_at_1 value: 29.011 - type: precision_at_10 value: 6.259 - type: precision_at_100 value: 0.984 - type: precision_at_1000 value: 0.13 - type: precision_at_3 value: 15.329999999999998 - type: precision_at_5 value: 10.541 - type: recall_at_1 value: 24.842 - type: recall_at_10 value: 48.304 - type: recall_at_100 value: 70.04899999999999 - type: recall_at_1000 value: 87.82600000000001 - type: recall_at_3 value: 36.922 - type: recall_at_5 value: 41.449999999999996 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackWebmastersRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 24.252000000000002 - type: map_at_10 value: 32.293 - type: map_at_100 value: 33.816 - type: map_at_1000 value: 34.053 - type: map_at_3 value: 29.781999999999996 - type: map_at_5 value: 31.008000000000003 - type: mrr_at_1 value: 29.051 - type: mrr_at_10 value: 36.722 - type: mrr_at_100 value: 37.663000000000004 - type: mrr_at_1000 value: 37.734 - type: mrr_at_3 value: 34.354 - type: mrr_at_5 value: 35.609 - type: ndcg_at_1 value: 29.051 - type: ndcg_at_10 value: 37.775999999999996 - type: ndcg_at_100 value: 43.221 - type: ndcg_at_1000 value: 46.116 - type: ndcg_at_3 value: 33.403 - type: ndcg_at_5 value: 35.118 - type: precision_at_1 value: 29.051 - type: precision_at_10 value: 7.332 - type: precision_at_100 value: 1.49 - type: precision_at_1000 value: 0.23600000000000002 - type: precision_at_3 value: 15.415000000000001 - type: precision_at_5 value: 11.107 - type: recall_at_1 value: 24.252000000000002 - type: recall_at_10 value: 47.861 - type: recall_at_100 value: 72.21600000000001 - type: recall_at_1000 value: 90.886 - type: recall_at_3 value: 35.533 - type: recall_at_5 value: 39.959 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackWordpressRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 20.025000000000002 - type: map_at_10 value: 27.154 - type: map_at_100 value: 28.118 - type: map_at_1000 value: 28.237000000000002 - type: map_at_3 value: 25.017 - type: map_at_5 value: 25.832 - type: mrr_at_1 value: 21.627 - type: mrr_at_10 value: 28.884999999999998 - type: mrr_at_100 value: 29.741 - type: mrr_at_1000 value: 29.831999999999997 - type: mrr_at_3 value: 26.741 - type: mrr_at_5 value: 27.628000000000004 - type: ndcg_at_1 value: 21.627 - type: ndcg_at_10 value: 31.436999999999998 - type: ndcg_at_100 value: 36.181000000000004 - type: ndcg_at_1000 value: 38.986 - type: ndcg_at_3 value: 27.025 - type: ndcg_at_5 value: 28.436 - type: precision_at_1 value: 21.627 - type: precision_at_10 value: 5.009 - type: precision_at_100 value: 0.7929999999999999 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 11.522 - type: precision_at_5 value: 7.763000000000001 - type: recall_at_1 value: 20.025000000000002 - type: recall_at_10 value: 42.954 - type: recall_at_100 value: 64.67500000000001 - type: recall_at_1000 value: 85.301 - type: recall_at_3 value: 30.892999999999997 - type: recall_at_5 value: 34.288000000000004 - task: type: Retrieval dataset: type: climate-fever name: MTEB ClimateFEVER config: default split: test revision: None metrics: - type: map_at_1 value: 10.079 - type: map_at_10 value: 16.930999999999997 - type: map_at_100 value: 18.398999999999997 - type: map_at_1000 value: 18.561 - type: map_at_3 value: 14.294 - type: map_at_5 value: 15.579 - type: mrr_at_1 value: 22.606 - type: mrr_at_10 value: 32.513 - type: mrr_at_100 value: 33.463 - type: mrr_at_1000 value: 33.513999999999996 - type: mrr_at_3 value: 29.479 - type: mrr_at_5 value: 31.3 - type: ndcg_at_1 value: 22.606 - type: ndcg_at_10 value: 24.053 - type: ndcg_at_100 value: 30.258000000000003 - type: ndcg_at_1000 value: 33.516 - type: ndcg_at_3 value: 19.721 - type: ndcg_at_5 value: 21.144 - type: precision_at_1 value: 22.606 - type: precision_at_10 value: 7.55 - type: precision_at_100 value: 1.399 - type: precision_at_1000 value: 0.2 - type: precision_at_3 value: 14.701 - type: precision_at_5 value: 11.192 - type: recall_at_1 value: 10.079 - type: recall_at_10 value: 28.970000000000002 - type: recall_at_100 value: 50.805 - type: recall_at_1000 value: 69.378 - type: recall_at_3 value: 18.199 - type: recall_at_5 value: 22.442 - task: type: Retrieval dataset: type: dbpedia-entity name: MTEB DBPedia config: default split: test revision: None metrics: - type: map_at_1 value: 7.794 - type: map_at_10 value: 15.165999999999999 - type: map_at_100 value: 20.508000000000003 - type: map_at_1000 value: 21.809 - type: map_at_3 value: 11.568000000000001 - type: map_at_5 value: 13.059000000000001 - type: mrr_at_1 value: 56.49999999999999 - type: mrr_at_10 value: 65.90899999999999 - type: mrr_at_100 value: 66.352 - type: mrr_at_1000 value: 66.369 - type: mrr_at_3 value: 64.0 - type: mrr_at_5 value: 65.10000000000001 - type: ndcg_at_1 value: 44.25 - type: ndcg_at_10 value: 32.649 - type: ndcg_at_100 value: 36.668 - type: ndcg_at_1000 value: 43.918 - type: ndcg_at_3 value: 37.096000000000004 - type: ndcg_at_5 value: 34.048 - type: precision_at_1 value: 56.49999999999999 - type: precision_at_10 value: 25.45 - type: precision_at_100 value: 8.055 - type: precision_at_1000 value: 1.7489999999999999 - type: precision_at_3 value: 41.0 - type: precision_at_5 value: 32.85 - type: recall_at_1 value: 7.794 - type: recall_at_10 value: 20.101 - type: recall_at_100 value: 42.448 - type: recall_at_1000 value: 65.88000000000001 - type: recall_at_3 value: 12.753 - type: recall_at_5 value: 15.307 - task: type: Classification dataset: type: mteb/emotion name: MTEB EmotionClassification config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 44.01 - type: f1 value: 38.659680951114964 - task: type: Retrieval dataset: type: fever name: MTEB FEVER config: default split: test revision: None metrics: - type: map_at_1 value: 49.713 - type: map_at_10 value: 61.79 - type: map_at_100 value: 62.28 - type: map_at_1000 value: 62.297000000000004 - type: map_at_3 value: 59.361 - type: map_at_5 value: 60.92100000000001 - type: mrr_at_1 value: 53.405 - type: mrr_at_10 value: 65.79899999999999 - type: mrr_at_100 value: 66.219 - type: mrr_at_1000 value: 66.227 - type: mrr_at_3 value: 63.431000000000004 - type: mrr_at_5 value: 64.98 - type: ndcg_at_1 value: 53.405 - type: ndcg_at_10 value: 68.01899999999999 - type: ndcg_at_100 value: 70.197 - type: ndcg_at_1000 value: 70.571 - type: ndcg_at_3 value: 63.352 - type: ndcg_at_5 value: 66.018 - type: precision_at_1 value: 53.405 - type: precision_at_10 value: 9.119 - type: precision_at_100 value: 1.03 - type: precision_at_1000 value: 0.107 - type: precision_at_3 value: 25.602999999999998 - type: precision_at_5 value: 16.835 - type: recall_at_1 value: 49.713 - type: recall_at_10 value: 83.306 - type: recall_at_100 value: 92.92 - type: recall_at_1000 value: 95.577 - type: recall_at_3 value: 70.798 - type: recall_at_5 value: 77.254 - task: type: Retrieval dataset: type: fiqa name: MTEB FiQA2018 config: default split: test revision: None metrics: - type: map_at_1 value: 15.310000000000002 - type: map_at_10 value: 26.204 - type: map_at_100 value: 27.932000000000002 - type: map_at_1000 value: 28.121000000000002 - type: map_at_3 value: 22.481 - type: map_at_5 value: 24.678 - type: mrr_at_1 value: 29.784 - type: mrr_at_10 value: 39.582 - type: mrr_at_100 value: 40.52 - type: mrr_at_1000 value: 40.568 - type: mrr_at_3 value: 37.114000000000004 - type: mrr_at_5 value: 38.596000000000004 - type: ndcg_at_1 value: 29.784 - type: ndcg_at_10 value: 33.432 - type: ndcg_at_100 value: 40.281 - type: ndcg_at_1000 value: 43.653999999999996 - type: ndcg_at_3 value: 29.612 - type: ndcg_at_5 value: 31.223 - type: precision_at_1 value: 29.784 - type: precision_at_10 value: 9.645 - type: precision_at_100 value: 1.645 - type: precision_at_1000 value: 0.22499999999999998 - type: precision_at_3 value: 20.165 - type: precision_at_5 value: 15.401000000000002 - type: recall_at_1 value: 15.310000000000002 - type: recall_at_10 value: 40.499 - type: recall_at_100 value: 66.643 - type: recall_at_1000 value: 87.059 - type: recall_at_3 value: 27.492 - type: recall_at_5 value: 33.748 - task: type: Retrieval dataset: type: hotpotqa name: MTEB HotpotQA config: default split: test revision: None metrics: - type: map_at_1 value: 33.599000000000004 - type: map_at_10 value: 47.347 - type: map_at_100 value: 48.191 - type: map_at_1000 value: 48.263 - type: map_at_3 value: 44.698 - type: map_at_5 value: 46.278999999999996 - type: mrr_at_1 value: 67.19800000000001 - type: mrr_at_10 value: 74.054 - type: mrr_at_100 value: 74.376 - type: mrr_at_1000 value: 74.392 - type: mrr_at_3 value: 72.849 - type: mrr_at_5 value: 73.643 - type: ndcg_at_1 value: 67.19800000000001 - type: ndcg_at_10 value: 56.482 - type: ndcg_at_100 value: 59.694 - type: ndcg_at_1000 value: 61.204 - type: ndcg_at_3 value: 52.43299999999999 - type: ndcg_at_5 value: 54.608000000000004 - type: precision_at_1 value: 67.19800000000001 - type: precision_at_10 value: 11.613999999999999 - type: precision_at_100 value: 1.415 - type: precision_at_1000 value: 0.16199999999999998 - type: precision_at_3 value: 32.726 - type: precision_at_5 value: 21.349999999999998 - type: recall_at_1 value: 33.599000000000004 - type: recall_at_10 value: 58.069 - type: recall_at_100 value: 70.736 - type: recall_at_1000 value: 80.804 - type: recall_at_3 value: 49.088 - type: recall_at_5 value: 53.376000000000005 - task: type: Classification dataset: type: mteb/imdb name: MTEB ImdbClassification config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 73.64359999999999 - type: ap value: 67.54685976014599 - type: f1 value: 73.55148707559482 - task: type: Retrieval dataset: type: msmarco name: MTEB MSMARCO config: default split: dev revision: None metrics: - type: map_at_1 value: 19.502 - type: map_at_10 value: 30.816 - type: map_at_100 value: 32.007999999999996 - type: map_at_1000 value: 32.067 - type: map_at_3 value: 27.215 - type: map_at_5 value: 29.304000000000002 - type: mrr_at_1 value: 20.072000000000003 - type: mrr_at_10 value: 31.406 - type: mrr_at_100 value: 32.549 - type: mrr_at_1000 value: 32.602 - type: mrr_at_3 value: 27.839000000000002 - type: mrr_at_5 value: 29.926000000000002 - type: ndcg_at_1 value: 20.086000000000002 - type: ndcg_at_10 value: 37.282 - type: ndcg_at_100 value: 43.206 - type: ndcg_at_1000 value: 44.690000000000005 - type: ndcg_at_3 value: 29.932 - type: ndcg_at_5 value: 33.668 - type: precision_at_1 value: 20.086000000000002 - type: precision_at_10 value: 5.961 - type: precision_at_100 value: 0.898 - type: precision_at_1000 value: 0.10200000000000001 - type: precision_at_3 value: 12.856000000000002 - type: precision_at_5 value: 9.596 - type: recall_at_1 value: 19.502 - type: recall_at_10 value: 57.182 - type: recall_at_100 value: 84.952 - type: recall_at_1000 value: 96.34700000000001 - type: recall_at_3 value: 37.193 - type: recall_at_5 value: 46.157 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (en) config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 93.96488828089375 - type: f1 value: 93.32119260543482 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (en) config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 72.4965800273598 - type: f1 value: 49.34896217536082 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (en) config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.60928043039678 - type: f1 value: 64.34244712074538 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (en) config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 69.75453934095493 - type: f1 value: 68.39224867489249 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-p2p name: MTEB MedrxivClusteringP2P config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 31.862573504920082 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-s2s name: MTEB MedrxivClusteringS2S config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 27.511123551196803 - task: type: Reranking dataset: type: mteb/mind_small name: MTEB MindSmallReranking config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 30.99145104942086 - type: mrr value: 32.03606480418627 - task: type: Retrieval dataset: type: nfcorpus name: MTEB NFCorpus config: default split: test revision: None metrics: - type: map_at_1 value: 5.015 - type: map_at_10 value: 11.054 - type: map_at_100 value: 13.773 - type: map_at_1000 value: 15.082999999999998 - type: map_at_3 value: 8.253 - type: map_at_5 value: 9.508999999999999 - type: mrr_at_1 value: 42.105 - type: mrr_at_10 value: 50.44499999999999 - type: mrr_at_100 value: 51.080000000000005 - type: mrr_at_1000 value: 51.129999999999995 - type: mrr_at_3 value: 48.555 - type: mrr_at_5 value: 49.84 - type: ndcg_at_1 value: 40.402 - type: ndcg_at_10 value: 30.403000000000002 - type: ndcg_at_100 value: 28.216 - type: ndcg_at_1000 value: 37.021 - type: ndcg_at_3 value: 35.53 - type: ndcg_at_5 value: 33.202999999999996 - type: precision_at_1 value: 42.105 - type: precision_at_10 value: 22.353 - type: precision_at_100 value: 7.266 - type: precision_at_1000 value: 2.011 - type: precision_at_3 value: 32.921 - type: precision_at_5 value: 28.297 - type: recall_at_1 value: 5.015 - type: recall_at_10 value: 14.393 - type: recall_at_100 value: 28.893 - type: recall_at_1000 value: 60.18 - type: recall_at_3 value: 9.184000000000001 - type: recall_at_5 value: 11.39 - task: type: Retrieval dataset: type: nq name: MTEB NQ config: default split: test revision: None metrics: - type: map_at_1 value: 29.524 - type: map_at_10 value: 44.182 - type: map_at_100 value: 45.228 - type: map_at_1000 value: 45.265 - type: map_at_3 value: 39.978 - type: map_at_5 value: 42.482 - type: mrr_at_1 value: 33.256 - type: mrr_at_10 value: 46.661 - type: mrr_at_100 value: 47.47 - type: mrr_at_1000 value: 47.496 - type: mrr_at_3 value: 43.187999999999995 - type: mrr_at_5 value: 45.330999999999996 - type: ndcg_at_1 value: 33.227000000000004 - type: ndcg_at_10 value: 51.589 - type: ndcg_at_100 value: 56.043 - type: ndcg_at_1000 value: 56.937000000000005 - type: ndcg_at_3 value: 43.751 - type: ndcg_at_5 value: 47.937000000000005 - type: precision_at_1 value: 33.227000000000004 - type: precision_at_10 value: 8.556999999999999 - type: precision_at_100 value: 1.103 - type: precision_at_1000 value: 0.11900000000000001 - type: precision_at_3 value: 19.921 - type: precision_at_5 value: 14.396999999999998 - type: recall_at_1 value: 29.524 - type: recall_at_10 value: 71.615 - type: recall_at_100 value: 91.056 - type: recall_at_1000 value: 97.72800000000001 - type: recall_at_3 value: 51.451 - type: recall_at_5 value: 61.119 - task: type: Retrieval dataset: type: quora name: MTEB QuoraRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 69.596 - type: map_at_10 value: 83.281 - type: map_at_100 value: 83.952 - type: map_at_1000 value: 83.97200000000001 - type: map_at_3 value: 80.315 - type: map_at_5 value: 82.223 - type: mrr_at_1 value: 80.17 - type: mrr_at_10 value: 86.522 - type: mrr_at_100 value: 86.644 - type: mrr_at_1000 value: 86.64500000000001 - type: mrr_at_3 value: 85.438 - type: mrr_at_5 value: 86.21799999999999 - type: ndcg_at_1 value: 80.19 - type: ndcg_at_10 value: 87.19 - type: ndcg_at_100 value: 88.567 - type: ndcg_at_1000 value: 88.70400000000001 - type: ndcg_at_3 value: 84.17999999999999 - type: ndcg_at_5 value: 85.931 - type: precision_at_1 value: 80.19 - type: precision_at_10 value: 13.209000000000001 - type: precision_at_100 value: 1.518 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 36.717 - type: precision_at_5 value: 24.248 - type: recall_at_1 value: 69.596 - type: recall_at_10 value: 94.533 - type: recall_at_100 value: 99.322 - type: recall_at_1000 value: 99.965 - type: recall_at_3 value: 85.911 - type: recall_at_5 value: 90.809 - task: type: Clustering dataset: type: mteb/reddit-clustering name: MTEB RedditClustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 49.27650627571912 - task: type: Clustering dataset: type: mteb/reddit-clustering-p2p name: MTEB RedditClusteringP2P config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 57.08550946534183 - task: type: Retrieval dataset: type: scidocs name: MTEB SCIDOCS config: default split: test revision: None metrics: - type: map_at_1 value: 4.568 - type: map_at_10 value: 10.862 - type: map_at_100 value: 12.757 - type: map_at_1000 value: 13.031 - type: map_at_3 value: 7.960000000000001 - type: map_at_5 value: 9.337 - type: mrr_at_1 value: 22.5 - type: mrr_at_10 value: 32.6 - type: mrr_at_100 value: 33.603 - type: mrr_at_1000 value: 33.672000000000004 - type: mrr_at_3 value: 29.299999999999997 - type: mrr_at_5 value: 31.25 - type: ndcg_at_1 value: 22.5 - type: ndcg_at_10 value: 18.605 - type: ndcg_at_100 value: 26.029999999999998 - type: ndcg_at_1000 value: 31.256 - type: ndcg_at_3 value: 17.873 - type: ndcg_at_5 value: 15.511 - type: precision_at_1 value: 22.5 - type: precision_at_10 value: 9.58 - type: precision_at_100 value: 2.033 - type: precision_at_1000 value: 0.33 - type: precision_at_3 value: 16.633 - type: precision_at_5 value: 13.54 - type: recall_at_1 value: 4.568 - type: recall_at_10 value: 19.402 - type: recall_at_100 value: 41.277 - type: recall_at_1000 value: 66.963 - type: recall_at_3 value: 10.112 - type: recall_at_5 value: 13.712 - task: type: STS dataset: type: mteb/sickr-sts name: MTEB SICK-R config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 83.31992291680787 - type: cos_sim_spearman value: 76.7212346922664 - type: euclidean_pearson value: 80.42189271706478 - type: euclidean_spearman value: 76.7212342532493 - type: manhattan_pearson value: 80.33171093031578 - type: manhattan_spearman value: 76.63192883074694 - task: type: STS dataset: type: mteb/sts12-sts name: MTEB STS12 config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 83.16654278886763 - type: cos_sim_spearman value: 73.66390263429565 - type: euclidean_pearson value: 79.7485360086639 - type: euclidean_spearman value: 73.66389870373436 - type: manhattan_pearson value: 79.73652237443706 - type: manhattan_spearman value: 73.65296117151647 - task: type: STS dataset: type: mteb/sts13-sts name: MTEB STS13 config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 82.40389689929246 - type: cos_sim_spearman value: 83.29727595993955 - type: euclidean_pearson value: 82.23970587854079 - type: euclidean_spearman value: 83.29727595993955 - type: manhattan_pearson value: 82.18823600831897 - type: manhattan_spearman value: 83.20746192209594 - task: type: STS dataset: type: mteb/sts14-sts name: MTEB STS14 config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 81.73505246913413 - type: cos_sim_spearman value: 79.1686548248754 - type: euclidean_pearson value: 80.48889135993412 - type: euclidean_spearman value: 79.16864112930354 - type: manhattan_pearson value: 80.40720651057302 - type: manhattan_spearman value: 79.0640155089286 - task: type: STS dataset: type: mteb/sts15-sts name: MTEB STS15 config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 86.3953512879065 - type: cos_sim_spearman value: 87.29947322714338 - type: euclidean_pearson value: 86.59759438529645 - type: euclidean_spearman value: 87.29947511092824 - type: manhattan_pearson value: 86.52097806169155 - type: manhattan_spearman value: 87.22987242146534 - task: type: STS dataset: type: mteb/sts16-sts name: MTEB STS16 config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 82.48565753792056 - type: cos_sim_spearman value: 83.6049720319893 - type: euclidean_pearson value: 82.56452023172913 - type: euclidean_spearman value: 83.60490168191697 - type: manhattan_pearson value: 82.58079941137872 - type: manhattan_spearman value: 83.60975807374051 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-en) config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 88.18239976618212 - type: cos_sim_spearman value: 88.23061724730616 - type: euclidean_pearson value: 87.78482472776658 - type: euclidean_spearman value: 88.23061724730616 - type: manhattan_pearson value: 87.75059641730239 - type: manhattan_spearman value: 88.22527413524622 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (en) config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 63.42816418706765 - type: cos_sim_spearman value: 63.4569864520124 - type: euclidean_pearson value: 64.35405409953853 - type: euclidean_spearman value: 63.4569864520124 - type: manhattan_pearson value: 63.96649236073056 - type: manhattan_spearman value: 63.01448583722708 - task: type: STS dataset: type: mteb/stsbenchmark-sts name: MTEB STSBenchmark config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 83.41659638047614 - type: cos_sim_spearman value: 84.03893866106175 - type: euclidean_pearson value: 84.2251203953798 - type: euclidean_spearman value: 84.03893866106175 - type: manhattan_pearson value: 84.22733643205514 - type: manhattan_spearman value: 84.06504411263612 - task: type: Reranking dataset: type: mteb/scidocs-reranking name: MTEB SciDocsRR config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 79.75608022582414 - type: mrr value: 94.0947732369301 - task: type: Retrieval dataset: type: scifact name: MTEB SciFact config: default split: test revision: None metrics: - type: map_at_1 value: 50.161 - type: map_at_10 value: 59.458999999999996 - type: map_at_100 value: 60.156 - type: map_at_1000 value: 60.194 - type: map_at_3 value: 56.45400000000001 - type: map_at_5 value: 58.165 - type: mrr_at_1 value: 53.333 - type: mrr_at_10 value: 61.050000000000004 - type: mrr_at_100 value: 61.586 - type: mrr_at_1000 value: 61.624 - type: mrr_at_3 value: 58.889 - type: mrr_at_5 value: 60.122 - type: ndcg_at_1 value: 53.333 - type: ndcg_at_10 value: 63.888999999999996 - type: ndcg_at_100 value: 66.963 - type: ndcg_at_1000 value: 68.062 - type: ndcg_at_3 value: 59.01 - type: ndcg_at_5 value: 61.373999999999995 - type: precision_at_1 value: 53.333 - type: precision_at_10 value: 8.633000000000001 - type: precision_at_100 value: 1.027 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_3 value: 23.111 - type: precision_at_5 value: 15.467 - type: recall_at_1 value: 50.161 - type: recall_at_10 value: 75.922 - type: recall_at_100 value: 90.0 - type: recall_at_1000 value: 98.667 - type: recall_at_3 value: 62.90599999999999 - type: recall_at_5 value: 68.828 - task: type: PairClassification dataset: type: mteb/sprintduplicatequestions-pairclassification name: MTEB SprintDuplicateQuestions config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.81188118811882 - type: cos_sim_ap value: 95.11619225962413 - type: cos_sim_f1 value: 90.35840484603736 - type: cos_sim_precision value: 91.23343527013252 - type: cos_sim_recall value: 89.5 - type: dot_accuracy value: 99.81188118811882 - type: dot_ap value: 95.11619225962413 - type: dot_f1 value: 90.35840484603736 - type: dot_precision value: 91.23343527013252 - type: dot_recall value: 89.5 - type: euclidean_accuracy value: 99.81188118811882 - type: euclidean_ap value: 95.11619225962413 - type: euclidean_f1 value: 90.35840484603736 - type: euclidean_precision value: 91.23343527013252 - type: euclidean_recall value: 89.5 - type: manhattan_accuracy value: 99.80891089108911 - type: manhattan_ap value: 95.07294266220966 - type: manhattan_f1 value: 90.21794221996959 - type: manhattan_precision value: 91.46968139773895 - type: manhattan_recall value: 89.0 - type: max_accuracy value: 99.81188118811882 - type: max_ap value: 95.11619225962413 - type: max_f1 value: 90.35840484603736 - task: type: Clustering dataset: type: mteb/stackexchange-clustering name: MTEB StackExchangeClustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 55.3481874105239 - task: type: Clustering dataset: type: mteb/stackexchange-clustering-p2p name: MTEB StackExchangeClusteringP2P config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 34.421291695525 - task: type: Reranking dataset: type: mteb/stackoverflowdupquestions-reranking name: MTEB StackOverflowDupQuestions config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 49.98746633276634 - type: mrr value: 50.63143249724133 - task: type: Summarization dataset: type: mteb/summeval name: MTEB SummEval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 31.009961979844036 - type: cos_sim_spearman value: 30.558416108881044 - type: dot_pearson value: 31.009964941134253 - type: dot_spearman value: 30.545760761761393 - task: type: Retrieval dataset: type: trec-covid name: MTEB TRECCOVID config: default split: test revision: None metrics: - type: map_at_1 value: 0.207 - type: map_at_10 value: 1.6 - type: map_at_100 value: 8.594 - type: map_at_1000 value: 20.213 - type: map_at_3 value: 0.585 - type: map_at_5 value: 0.9039999999999999 - type: mrr_at_1 value: 78.0 - type: mrr_at_10 value: 87.4 - type: mrr_at_100 value: 87.4 - type: mrr_at_1000 value: 87.4 - type: mrr_at_3 value: 86.667 - type: mrr_at_5 value: 87.06700000000001 - type: ndcg_at_1 value: 73.0 - type: ndcg_at_10 value: 65.18 - type: ndcg_at_100 value: 49.631 - type: ndcg_at_1000 value: 43.498999999999995 - type: ndcg_at_3 value: 71.83800000000001 - type: ndcg_at_5 value: 69.271 - type: precision_at_1 value: 78.0 - type: precision_at_10 value: 69.19999999999999 - type: precision_at_100 value: 50.980000000000004 - type: precision_at_1000 value: 19.426 - type: precision_at_3 value: 77.333 - type: precision_at_5 value: 74.0 - type: recall_at_1 value: 0.207 - type: recall_at_10 value: 1.822 - type: recall_at_100 value: 11.849 - type: recall_at_1000 value: 40.492 - type: recall_at_3 value: 0.622 - type: recall_at_5 value: 0.9809999999999999 - task: type: Retrieval dataset: type: webis-touche2020 name: MTEB Touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 2.001 - type: map_at_10 value: 10.376000000000001 - type: map_at_100 value: 16.936999999999998 - type: map_at_1000 value: 18.615000000000002 - type: map_at_3 value: 5.335999999999999 - type: map_at_5 value: 7.374 - type: mrr_at_1 value: 20.408 - type: mrr_at_10 value: 38.29 - type: mrr_at_100 value: 39.33 - type: mrr_at_1000 value: 39.347 - type: mrr_at_3 value: 32.993 - type: mrr_at_5 value: 36.973 - type: ndcg_at_1 value: 17.347 - type: ndcg_at_10 value: 23.515 - type: ndcg_at_100 value: 37.457 - type: ndcg_at_1000 value: 49.439 - type: ndcg_at_3 value: 22.762999999999998 - type: ndcg_at_5 value: 22.622 - type: precision_at_1 value: 20.408 - type: precision_at_10 value: 22.448999999999998 - type: precision_at_100 value: 8.184 - type: precision_at_1000 value: 1.608 - type: precision_at_3 value: 25.85 - type: precision_at_5 value: 25.306 - type: recall_at_1 value: 2.001 - type: recall_at_10 value: 17.422 - type: recall_at_100 value: 51.532999999999994 - type: recall_at_1000 value: 87.466 - type: recall_at_3 value: 6.861000000000001 - type: recall_at_5 value: 10.502 - task: type: Classification dataset: type: mteb/toxic_conversations_50k name: MTEB ToxicConversationsClassification config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 71.54419999999999 - type: ap value: 14.372170450843907 - type: f1 value: 54.94420257390529 - task: type: Classification dataset: type: mteb/tweet_sentiment_extraction name: MTEB TweetSentimentExtractionClassification config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 59.402942840973395 - type: f1 value: 59.4166538875571 - task: type: Clustering dataset: type: mteb/twentynewsgroups-clustering name: MTEB TwentyNewsgroupsClustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 41.569064336457906 - task: type: PairClassification dataset: type: mteb/twittersemeval2015-pairclassification name: MTEB TwitterSemEval2015 config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 85.31322644096085 - type: cos_sim_ap value: 72.14518894837381 - type: cos_sim_f1 value: 66.67489813557229 - type: cos_sim_precision value: 62.65954977953121 - type: cos_sim_recall value: 71.2401055408971 - type: dot_accuracy value: 85.31322644096085 - type: dot_ap value: 72.14521480685293 - type: dot_f1 value: 66.67489813557229 - type: dot_precision value: 62.65954977953121 - type: dot_recall value: 71.2401055408971 - type: euclidean_accuracy value: 85.31322644096085 - type: euclidean_ap value: 72.14520820485349 - type: euclidean_f1 value: 66.67489813557229 - type: euclidean_precision value: 62.65954977953121 - type: euclidean_recall value: 71.2401055408971 - type: manhattan_accuracy value: 85.21785778148656 - type: manhattan_ap value: 72.01177147657364 - type: manhattan_f1 value: 66.62594673833374 - type: manhattan_precision value: 62.0336669699727 - type: manhattan_recall value: 71.95250659630607 - type: max_accuracy value: 85.31322644096085 - type: max_ap value: 72.14521480685293 - type: max_f1 value: 66.67489813557229 - task: type: PairClassification dataset: type: mteb/twitterurlcorpus-pairclassification name: MTEB TwitterURLCorpus config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 89.12756626693057 - type: cos_sim_ap value: 86.05430786440826 - type: cos_sim_f1 value: 78.27759692216631 - type: cos_sim_precision value: 75.33466248931929 - type: cos_sim_recall value: 81.45980905451185 - type: dot_accuracy value: 89.12950673341872 - type: dot_ap value: 86.05431161145492 - type: dot_f1 value: 78.27759692216631 - type: dot_precision value: 75.33466248931929 - type: dot_recall value: 81.45980905451185 - type: euclidean_accuracy value: 89.12756626693057 - type: euclidean_ap value: 86.05431303247397 - type: euclidean_f1 value: 78.27759692216631 - type: euclidean_precision value: 75.33466248931929 - type: euclidean_recall value: 81.45980905451185 - type: manhattan_accuracy value: 89.04994760740482 - type: manhattan_ap value: 86.00860610892074 - type: manhattan_f1 value: 78.1846776005392 - type: manhattan_precision value: 76.10438839480975 - type: manhattan_recall value: 80.3818909762858 - type: max_accuracy value: 89.12950673341872 - type: max_ap value: 86.05431303247397 - type: max_f1 value: 78.27759692216631 --- <!-- TODO: add evaluation results here --> <br><br> <p align="center"> <img src="https://aeiljuispo.cloudimg.io/v7/https://cdn-uploads.huggingface.co/production/uploads/603763514de52ff951d89793/AFoybzd5lpBQXEBrQHuTt.png?w=200&h=200&f=face" alt="Finetuner logo: Finetuner helps you to create experiments in order to improve embeddings on search tasks. It accompanies you to deliver the last mile of performance-tuning for neural search applications." width="150px"> </p> <p align="center"> <b>The text embedding set trained by <a href="https://jina.ai/"><b>Jina AI</b></a>.</b> </p> ## Quick Start The easiest way to starting using `jina-embeddings-v2-small-en` is to use Jina AI's [Embedding API](https://jina.ai/embeddings/). ## Intended Usage & Model Info `jina-embeddings-v2-small-en` is an English, monolingual **embedding model** supporting **8192 sequence length**. It is based on a BERT architecture (JinaBERT) that supports the symmetric bidirectional variant of [ALiBi](https://arxiv.org/abs/2108.12409) to allow longer sequence length. The backbone `jina-bert-v2-small-en` is pretrained on the C4 dataset. The model is further trained on Jina AI's collection of more than 400 millions of sentence pairs and hard negatives. These pairs were obtained from various domains and were carefully selected through a thorough cleaning process. The embedding model was trained using 512 sequence length, but extrapolates to 8k sequence length (or even longer) thanks to ALiBi. This makes our model useful for a range of use cases, especially when processing long documents is needed, including long document retrieval, semantic textual similarity, text reranking, recommendation, RAG and LLM-based generative search, etc. This model has 33 million parameters, which enables lightning-fast and memory efficient inference, while still delivering impressive performance. Additionally, we provide the following embedding models: - [`jina-embeddings-v2-small-en`](https://huggingface.co/jinaai/jina-embeddings-v2-small-en): 33 million parameters **(you are here)**. - [`jina-embeddings-v2-base-en`](https://huggingface.co/jinaai/jina-embeddings-v2-base-en): 137 million parameters. - [`jina-embeddings-v2-base-zh`](https://huggingface.co/jinaai/jina-embeddings-v2-base-zh): 161 million parameters Chinese-English Bilingual embeddings. - [`jina-embeddings-v2-base-de`](https://huggingface.co/jinaai/jina-embeddings-v2-base-de): 161 million parameters German-English Bilingual embeddings. - [`jina-embeddings-v2-base-es`](): Spanish-English Bilingual embeddings (soon). ## Data & Parameters Jina Embeddings V2 [technical report](https://arxiv.org/abs/2310.19923) ## Usage **<details><summary>Please apply mean pooling when integrating the model.</summary>** <p> ### Why mean pooling? `mean poooling` takes all token embeddings from model output and averaging them at sentence/paragraph level. It has been proved to be the most effective way to produce high-quality sentence embeddings. We offer an `encode` function to deal with this. However, if you would like to do it without using the default `encode` function: ```python import torch import torch.nn.functional as F from transformers import AutoTokenizer, AutoModel def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) sentences = ['How is the weather today?', 'What is the current weather like today?'] tokenizer = AutoTokenizer.from_pretrained('jinaai/jina-embeddings-v2-small-en') model = AutoModel.from_pretrained('jinaai/jina-embeddings-v2-small-en', trust_remote_code=True) encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') with torch.no_grad(): model_output = model(**encoded_input) embeddings = mean_pooling(model_output, encoded_input['attention_mask']) embeddings = F.normalize(embeddings, p=2, dim=1) ``` </p> </details> You can use Jina Embedding models directly from transformers package. ```python !pip install transformers from transformers import AutoModel from numpy.linalg import norm cos_sim = lambda a,b: (a @ b.T) / (norm(a)*norm(b)) model = AutoModel.from_pretrained('jinaai/jina-embeddings-v2-small-en', trust_remote_code=True) # trust_remote_code is needed to use the encode method embeddings = model.encode(['How is the weather today?', 'What is the current weather like today?']) print(cos_sim(embeddings[0], embeddings[1])) ``` If you only want to handle shorter sequence, such as 2k, pass the `max_length` parameter to the `encode` function: ```python embeddings = model.encode( ['Very long ... document'], max_length=2048 ) ``` Using the its latest release (v2.3.0) sentence-transformers also supports Jina embeddings (Please make sure that you are logged into huggingface as well): ```python !pip install -U sentence-transformers from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim model = SentenceTransformer( "jinaai/jina-embeddings-v2-small-en", # switch to en/zh for English or Chinese trust_remote_code=True ) # control your input sequence length up to 8192 model.max_seq_length = 1024 embeddings = model.encode([ 'How is the weather today?', 'What is the current weather like today?' ]) print(cos_sim(embeddings[0], embeddings[1])) ``` ## Alternatives to Using Transformers Package 1. _Managed SaaS_: Get started with a free key on Jina AI's [Embedding API](https://jina.ai/embeddings/). 2. _Private and high-performance deployment_: Get started by picking from our suite of models and deploy them on [AWS Sagemaker](https://aws.amazon.com/marketplace/seller-profile?id=seller-stch2ludm6vgy). ## RAG Performance According to the latest blog post from [LLamaIndex](https://blog.llamaindex.ai/boosting-rag-picking-the-best-embedding-reranker-models-42d079022e83), > In summary, to achieve the peak performance in both hit rate and MRR, the combination of OpenAI or JinaAI-Base embeddings with the CohereRerank/bge-reranker-large reranker stands out. <img src="https://miro.medium.com/v2/resize:fit:4800/format:webp/1*ZP2RVejCZovF3FDCg-Bx3A.png" width="780px"> ## Plans 1. Bilingual embedding models supporting more European & Asian languages, including Spanish, French, Italian and Japanese. 2. Multimodal embedding models enable Multimodal RAG applications. 3. High-performt rerankers. ## Trouble Shooting **Loading of Model Code failed** If you forgot to pass the `trust_remote_code=True` flag when calling `AutoModel.from_pretrained` or initializing the model via the `SentenceTransformer` class, you will receive an error that the model weights could not be initialized. This is caused by tranformers falling back to creating a default BERT model, instead of a jina-embedding model: ```bash Some weights of the model checkpoint at jinaai/jina-embeddings-v2-base-en were not used when initializing BertModel: ['encoder.layer.2.mlp.layernorm.weight', 'encoder.layer.3.mlp.layernorm.weight', 'encoder.layer.10.mlp.wo.bias', 'encoder.layer.5.mlp.wo.bias', 'encoder.layer.2.mlp.layernorm.bias', 'encoder.layer.1.mlp.gated_layers.weight', 'encoder.layer.5.mlp.gated_layers.weight', 'encoder.layer.8.mlp.layernorm.bias', ... ``` **User is not logged into Huggingface** The model is only availabe under [gated access](https://huggingface.co/docs/hub/models-gated). This means you need to be logged into huggingface load load it. If you receive the following error, you need to provide an access token, either by using the huggingface-cli or providing the token via an environment variable as described above: ```bash OSError: jinaai/jina-embeddings-v2-base-en is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo with `use_auth_token` or log in with `huggingface-cli login` and pass `use_auth_token=True`. ``` ## Contact Join our [Discord community](https://discord.jina.ai) and chat with other community members about ideas. ## Citation If you find Jina Embeddings useful in your research, please cite the following paper: ``` @misc{günther2023jina, title={Jina Embeddings 2: 8192-Token General-Purpose Text Embeddings for Long Documents}, author={Michael Günther and Jackmin Ong and Isabelle Mohr and Alaeddine Abdessalem and Tanguy Abel and Mohammad Kalim Akram and Susana Guzman and Georgios Mastrapas and Saba Sturua and Bo Wang and Maximilian Werk and Nan Wang and Han Xiao}, year={2023}, eprint={2310.19923}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
ggml-org/models
ggml-org
"2024-04-05T16:08:58Z"
488,732
3
null
[ "gguf", "region:us" ]
null
"2023-12-18T17:40:16Z"
Note: this repo will be removed soon - do not use
lengyue233/content-vec-best
lengyue233
"2023-03-31T08:02:09Z"
483,835
7
transformers
[ "transformers", "pytorch", "hubert", "doi:10.57967/hf/0479", "license:mit", "endpoints_compatible", "has_space", "region:us" ]
null
"2023-03-25T04:33:59Z"
--- license: mit --- # Content Vec Best Official Repo: [ContentVec](https://github.com/auspicious3000/contentvec) This repo brings fairseq ContentVec model to HuggingFace Transformers. ## How to use To use this model, you need to define ```python class HubertModelWithFinalProj(HubertModel): def __init__(self, config): super().__init__(config) # The final projection layer is only used for backward compatibility. # Following https://github.com/auspicious3000/contentvec/issues/6 # Remove this layer is necessary to achieve the desired outcome. self.final_proj = nn.Linear(config.hidden_size, config.classifier_proj_size) ``` and then load the model with ```python model = HubertModelWithFinalProj.from_pretrained("lengyue233/content-vec-best") x = model(audio)["last_hidden_state"] ``` ## How to convert You need to download the ContentVec_legacy model from the official repo, and then run ```bash python convert.py ```
cross-encoder/ms-marco-TinyBERT-L-2-v2
cross-encoder
"2021-08-05T08:39:45Z"
476,431
14
transformers
[ "transformers", "pytorch", "jax", "bert", "text-classification", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text-classification
"2022-03-02T23:29:05Z"
--- license: apache-2.0 --- # Cross-Encoder for MS Marco This model was trained on the [MS Marco Passage Ranking](https://github.com/microsoft/MSMARCO-Passage-Ranking) task. The model can be used for Information Retrieval: Given a query, encode the query will all possible passages (e.g. retrieved with ElasticSearch). Then sort the passages in a decreasing order. See [SBERT.net Retrieve & Re-rank](https://www.sbert.net/examples/applications/retrieve_rerank/README.html) for more details. The training code is available here: [SBERT.net Training MS Marco](https://github.com/UKPLab/sentence-transformers/tree/master/examples/training/ms_marco) ## Usage with Transformers ```python from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch model = AutoModelForSequenceClassification.from_pretrained('model_name') tokenizer = AutoTokenizer.from_pretrained('model_name') features = tokenizer(['How many people live in Berlin?', 'How many people live in Berlin?'], ['Berlin has a population of 3,520,031 registered inhabitants in an area of 891.82 square kilometers.', 'New York City is famous for the Metropolitan Museum of Art.'], padding=True, truncation=True, return_tensors="pt") model.eval() with torch.no_grad(): scores = model(**features).logits print(scores) ``` ## Usage with SentenceTransformers The usage becomes easier when you have [SentenceTransformers](https://www.sbert.net/) installed. Then, you can use the pre-trained models like this: ```python from sentence_transformers import CrossEncoder model = CrossEncoder('model_name', max_length=512) scores = model.predict([('Query', 'Paragraph1'), ('Query', 'Paragraph2') , ('Query', 'Paragraph3')]) ``` ## Performance In the following table, we provide various pre-trained Cross-Encoders together with their performance on the [TREC Deep Learning 2019](https://microsoft.github.io/TREC-2019-Deep-Learning/) and the [MS Marco Passage Reranking](https://github.com/microsoft/MSMARCO-Passage-Ranking/) dataset. | Model-Name | NDCG@10 (TREC DL 19) | MRR@10 (MS Marco Dev) | Docs / Sec | | ------------- |:-------------| -----| --- | | **Version 2 models** | | | | cross-encoder/ms-marco-TinyBERT-L-2-v2 | 69.84 | 32.56 | 9000 | cross-encoder/ms-marco-MiniLM-L-2-v2 | 71.01 | 34.85 | 4100 | cross-encoder/ms-marco-MiniLM-L-4-v2 | 73.04 | 37.70 | 2500 | cross-encoder/ms-marco-MiniLM-L-6-v2 | 74.30 | 39.01 | 1800 | cross-encoder/ms-marco-MiniLM-L-12-v2 | 74.31 | 39.02 | 960 | **Version 1 models** | | | | cross-encoder/ms-marco-TinyBERT-L-2 | 67.43 | 30.15 | 9000 | cross-encoder/ms-marco-TinyBERT-L-4 | 68.09 | 34.50 | 2900 | cross-encoder/ms-marco-TinyBERT-L-6 | 69.57 | 36.13 | 680 | cross-encoder/ms-marco-electra-base | 71.99 | 36.41 | 340 | **Other models** | | | | nboost/pt-tinybert-msmarco | 63.63 | 28.80 | 2900 | nboost/pt-bert-base-uncased-msmarco | 70.94 | 34.75 | 340 | nboost/pt-bert-large-msmarco | 73.36 | 36.48 | 100 | Capreolus/electra-base-msmarco | 71.23 | 36.89 | 340 | amberoad/bert-multilingual-passage-reranking-msmarco | 68.40 | 35.54 | 330 | sebastian-hofstaetter/distilbert-cat-margin_mse-T2-msmarco | 72.82 | 37.88 | 720 Note: Runtime was computed on a V100 GPU.
microsoft/table-transformer-detection
microsoft
"2023-09-06T14:49:09Z"
476,037
204
transformers
[ "transformers", "pytorch", "safetensors", "table-transformer", "object-detection", "arxiv:2110.00061", "license:mit", "endpoints_compatible", "has_space", "region:us" ]
object-detection
"2022-10-14T09:14:13Z"
--- license: mit widget: - src: https://www.invoicesimple.com/wp-content/uploads/2018/06/Sample-Invoice-printable.png example_title: Invoice --- # Table Transformer (fine-tuned for Table Detection) Table Transformer (DETR) model trained on PubTables1M. It was introduced in the paper [PubTables-1M: Towards Comprehensive Table Extraction From Unstructured Documents](https://arxiv.org/abs/2110.00061) by Smock et al. and first released in [this repository](https://github.com/microsoft/table-transformer). Disclaimer: The team releasing Table Transformer did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The Table Transformer is equivalent to [DETR](https://huggingface.co/docs/transformers/model_doc/detr), a Transformer-based object detection model. Note that the authors decided to use the "normalize before" setting of DETR, which means that layernorm is applied before self- and cross-attention. ## Usage You can use the raw model for detecting tables in documents. See the [documentation](https://huggingface.co/docs/transformers/main/en/model_doc/table-transformer) for more info.
MoritzLaurer/mDeBERTa-v3-base-mnli-xnli
MoritzLaurer
"2024-01-08T12:37:16Z"
471,257
201
transformers
[ "transformers", "pytorch", "onnx", "safetensors", "deberta-v2", "text-classification", "zero-shot-classification", "nli", "multilingual", "en", "ar", "bg", "de", "el", "es", "fr", "hi", "ru", "sw", "th", "tr", "ur", "vi", "zh", "dataset:multi_nli", "dataset:xnli", "arxiv:2111.09543", "arxiv:1809.05053", "arxiv:1911.02116", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
zero-shot-classification
"2022-03-02T23:29:04Z"
--- language: - multilingual - en - ar - bg - de - el - es - fr - hi - ru - sw - th - tr - ur - vi - zh license: mit tags: - zero-shot-classification - text-classification - nli - pytorch metrics: - accuracy datasets: - multi_nli - xnli pipeline_tag: zero-shot-classification widget: - text: "Angela Merkel ist eine Politikerin in Deutschland und Vorsitzende der CDU" candidate_labels: "politics, economy, entertainment, environment" --- # Multilingual mDeBERTa-v3-base-mnli-xnli ## Model description This multilingual model can perform natural language inference (NLI) on 100 languages and is therefore also suitable for multilingual zero-shot classification. The underlying model was pre-trained by Microsoft on the [CC100 multilingual dataset](https://huggingface.co/datasets/cc100). It was then fine-tuned on the [XNLI dataset](https://huggingface.co/datasets/xnli), which contains hypothesis-premise pairs from 15 languages, as well as the English [MNLI dataset](https://huggingface.co/datasets/multi_nli). As of December 2021, mDeBERTa-base is the best performing multilingual base-sized transformer model, introduced by Microsoft in [this paper](https://arxiv.org/pdf/2111.09543.pdf). If you are looking for a smaller, faster (but less performant) model, you can try [multilingual-MiniLMv2-L6-mnli-xnli](https://huggingface.co/MoritzLaurer/multilingual-MiniLMv2-L6-mnli-xnli). ### How to use the model #### Simple zero-shot classification pipeline ```python from transformers import pipeline classifier = pipeline("zero-shot-classification", model="MoritzLaurer/mDeBERTa-v3-base-mnli-xnli") sequence_to_classify = "Angela Merkel ist eine Politikerin in Deutschland und Vorsitzende der CDU" candidate_labels = ["politics", "economy", "entertainment", "environment"] output = classifier(sequence_to_classify, candidate_labels, multi_label=False) print(output) ``` #### NLI use-case ```python from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") model_name = "MoritzLaurer/mDeBERTa-v3-base-mnli-xnli" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) premise = "Angela Merkel ist eine Politikerin in Deutschland und Vorsitzende der CDU" hypothesis = "Emmanuel Macron is the President of France" input = tokenizer(premise, hypothesis, truncation=True, return_tensors="pt") output = model(input["input_ids"].to(device)) # device = "cuda:0" or "cpu" prediction = torch.softmax(output["logits"][0], -1).tolist() label_names = ["entailment", "neutral", "contradiction"] prediction = {name: round(float(pred) * 100, 1) for pred, name in zip(prediction, label_names)} print(prediction) ``` ### Training data This model was trained on the XNLI development dataset and the MNLI train dataset. The XNLI development set consists of 2490 professionally translated texts from English to 14 other languages (37350 texts in total) (see [this paper](https://arxiv.org/pdf/1809.05053.pdf)). Note that the XNLI contains a training set of 15 machine translated versions of the MNLI dataset for 15 languages, but due to quality issues with these machine translations, this model was only trained on the professional translations from the XNLI development set and the original English MNLI training set (392 702 texts). Not using machine translated texts can avoid overfitting the model to the 15 languages; avoids catastrophic forgetting of the other 85 languages mDeBERTa was pre-trained on; and significantly reduces training costs. ### Training procedure mDeBERTa-v3-base-mnli-xnli was trained using the Hugging Face trainer with the following hyperparameters. ``` training_args = TrainingArguments( num_train_epochs=2, # total number of training epochs learning_rate=2e-05, per_device_train_batch_size=16, # batch size per device during training per_device_eval_batch_size=16, # batch size for evaluation warmup_ratio=0.1, # number of warmup steps for learning rate scheduler weight_decay=0.06, # strength of weight decay ) ``` ### Eval results The model was evaluated on the XNLI test set on 15 languages (5010 texts per language, 75150 in total). Note that multilingual NLI models are capable of classifying NLI texts without receiving NLI training data in the specific language (cross-lingual transfer). This means that the model is also able of doing NLI on the other 85 languages mDeBERTa was training on, but performance is most likely lower than for those languages available in XNLI. Also note that if other multilingual models on the model hub claim performance of around 90% on languages other than English, the authors have most likely made a mistake during testing since non of the latest papers shows a multilingual average performance of more than a few points above 80% on XNLI (see [here](https://arxiv.org/pdf/2111.09543.pdf) or [here](https://arxiv.org/pdf/1911.02116.pdf)). average | ar | bg | de | el | en | es | fr | hi | ru | sw | th | tr | ur | vi | zh ---------|----------|---------|----------|----------|----------|----------|----------|----------|----------|----------|----------|----------|----------|----------|---------- 0.808 | 0.802 | 0.829 | 0.825 | 0.826 | 0.883 | 0.845 | 0.834 | 0.771 | 0.813 | 0.748 | 0.793 | 0.807 | 0.740 | 0.795 | 0.8116 ## Limitations and bias Please consult the original DeBERTa-V3 paper and literature on different NLI datasets for potential biases. ## Citation If you use this model, please cite: Laurer, Moritz, Wouter van Atteveldt, Andreu Salleras Casas, and Kasper Welbers. 2022. ‘Less Annotating, More Classifying – Addressing the Data Scarcity Issue of Supervised Machine Learning with Deep Transfer Learning and BERT - NLI’. Preprint, June. Open Science Framework. https://osf.io/74b8k. ## Ideas for cooperation or questions? If you have questions or ideas for cooperation, contact me at m{dot}laurer{at}vu{dot}nl or [LinkedIn](https://www.linkedin.com/in/moritz-laurer/) ## Debugging and issues Note that DeBERTa-v3 was released in late 2021 and older versions of HF Transformers seem to have issues running the model (e.g. resulting in an issue with the tokenizer). Using Transformers>=4.13 or higher might solve some issues. Note that mDeBERTa currently does not support FP16, see here: https://github.com/microsoft/DeBERTa/issues/77
xlnet/xlnet-base-cased
xlnet
"2023-01-24T14:50:31Z"
469,734
61
transformers
[ "transformers", "pytorch", "tf", "rust", "xlnet", "text-generation", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1906.08237", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text-generation
"2022-03-02T23:29:04Z"
--- language: en license: mit datasets: - bookcorpus - wikipedia --- # XLNet (base-sized model) XLNet model pre-trained on English language. It was introduced in the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Yang et al. and first released in [this repository](https://github.com/zihangdai/xlnet/). Disclaimer: The team releasing XLNet did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description XLNet is a new unsupervised language representation learning method based on a novel generalized permutation language modeling objective. Additionally, XLNet employs Transformer-XL as the backbone model, exhibiting excellent performance for language tasks involving long context. Overall, XLNet achieves state-of-the-art (SOTA) results on various downstream language tasks including question answering, natural language inference, sentiment analysis, and document ranking. ## Intended uses & limitations The model is mostly intended to be fine-tuned on a downstream task. See the [model hub](https://huggingface.co/models?search=xlnet) to look for fine-tuned versions on a task that interests you. Note that this model is primarily aimed at being fine-tuned on tasks that use the whole sentence (potentially masked) to make decisions, such as sequence classification, token classification or question answering. For tasks such as text generation, you should look at models like GPT2. ## Usage Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import XLNetTokenizer, XLNetModel tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased') model = XLNetModel.from_pretrained('xlnet-base-cased') inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state ``` ### BibTeX entry and citation info ```bibtex @article{DBLP:journals/corr/abs-1906-08237, author = {Zhilin Yang and Zihang Dai and Yiming Yang and Jaime G. Carbonell and Ruslan Salakhutdinov and Quoc V. Le}, title = {XLNet: Generalized Autoregressive Pretraining for Language Understanding}, journal = {CoRR}, volume = {abs/1906.08237}, year = {2019}, url = {http://arxiv.org/abs/1906.08237}, eprinttype = {arXiv}, eprint = {1906.08237}, timestamp = {Mon, 24 Jun 2019 17:28:45 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-1906-08237.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
cointegrated/rubert-tiny-sentiment-balanced
cointegrated
"2023-03-20T09:53:10Z"
466,974
8
transformers
[ "transformers", "pytorch", "safetensors", "bert", "text-classification", "russian", "classification", "sentiment", "multiclass", "ru", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
"2022-03-02T23:29:05Z"
--- language: ["ru"] tags: - russian - classification - sentiment - multiclass widget: - text: "Какая гадость эта ваша заливная рыба!" --- This is the [cointegrated/rubert-tiny](https://huggingface.co/cointegrated/rubert-tiny) model fine-tuned for classification of sentiment for short Russian texts. The problem is formulated as multiclass classification: `negative` vs `neutral` vs `positive`. ## Usage The function below estimates the sentiment of the given text: ```python # !pip install transformers sentencepiece --quiet import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification model_checkpoint = 'cointegrated/rubert-tiny-sentiment-balanced' tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint) if torch.cuda.is_available(): model.cuda() def get_sentiment(text, return_type='label'): """ Calculate sentiment of a text. `return_type` can be 'label', 'score' or 'proba' """ with torch.no_grad(): inputs = tokenizer(text, return_tensors='pt', truncation=True, padding=True).to(model.device) proba = torch.sigmoid(model(**inputs).logits).cpu().numpy()[0] if return_type == 'label': return model.config.id2label[proba.argmax()] elif return_type == 'score': return proba.dot([-1, 0, 1]) return proba text = 'Какая гадость эта ваша заливная рыба!' # classify the text print(get_sentiment(text, 'label')) # negative # score the text on the scale from -1 (very negative) to +1 (very positive) print(get_sentiment(text, 'score')) # -0.5894946306943893 # calculate probabilities of all labels print(get_sentiment(text, 'proba')) # [0.7870447 0.4947824 0.19755007] ``` ## Training We trained the model on [the datasets collected by Smetanin](https://github.com/sismetanin/sentiment-analysis-in-russian). We have converted all training data into a 3-class format and have up- and downsampled the training data to balance both the sources and the classes. The training code is available as [a Colab notebook](https://gist.github.com/avidale/e678c5478086c1d1adc52a85cb2b93e6). The metrics on the balanced test set are the following: | Source | Macro F1 | | ----------- | ----------- | | SentiRuEval2016_banks | 0.83 | | SentiRuEval2016_tele | 0.74 | | kaggle_news | 0.66 | | linis | 0.50 | | mokoron | 0.98 | | rureviews | 0.72 | | rusentiment | 0.67 |
vinai/bertweet-base
vinai
"2022-10-22T08:52:39Z"
457,991
24
transformers
[ "transformers", "pytorch", "tf", "jax", "roberta", "fill-mask", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
"2022-03-02T23:29:05Z"
# <a name="introduction"></a> BERTweet: A pre-trained language model for English Tweets BERTweet is the first public large-scale language model pre-trained for English Tweets. BERTweet is trained based on the [RoBERTa](https://github.com/pytorch/fairseq/blob/master/examples/roberta/README.md) pre-training procedure. The corpus used to pre-train BERTweet consists of 850M English Tweets (16B word tokens ~ 80GB), containing 845M Tweets streamed from 01/2012 to 08/2019 and 5M Tweets related to the **COVID-19** pandemic. The general architecture and experimental results of BERTweet can be found in our [paper](https://aclanthology.org/2020.emnlp-demos.2/): @inproceedings{bertweet, title = {{BERTweet: A pre-trained language model for English Tweets}}, author = {Dat Quoc Nguyen and Thanh Vu and Anh Tuan Nguyen}, booktitle = {Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations}, pages = {9--14}, year = {2020} } **Please CITE** our paper when BERTweet is used to help produce published results or is incorporated into other software. For further information or requests, please go to [BERTweet's homepage](https://github.com/VinAIResearch/BERTweet)! ### Main results <p float="left"> <img width="275" alt="postagging" src="https://user-images.githubusercontent.com/2412555/135724590-01d8d435-262d-44fe-a383-cd39324fe190.png" /> <img width="275" alt="ner" src="https://user-images.githubusercontent.com/2412555/135724598-1e3605e7-d8ce-4c5e-be4a-62ae8501fae7.png" /> </p> <p float="left"> <img width="275" alt="sentiment" src="https://user-images.githubusercontent.com/2412555/135724597-f1981f1e-fe73-4c03-b1ff-0cae0cc5f948.png" /> <img width="275" alt="irony" src="https://user-images.githubusercontent.com/2412555/135724595-15f4f2c8-bbb6-4ee6-82a0-034769dec183.png" /> </p>
THUDM/chatglm3-6b
THUDM
"2024-04-04T11:19:59Z"
457,383
915
transformers
[ "transformers", "pytorch", "safetensors", "chatglm", "glm", "thudm", "custom_code", "zh", "en", "arxiv:2103.10360", "arxiv:2210.02414", "endpoints_compatible", "has_space", "region:us" ]
null
"2023-10-25T09:56:41Z"
--- language: - zh - en tags: - glm - chatglm - thudm --- # ChatGLM3-6B <p align="center"> 💻 <a href="https://github.com/THUDM/ChatGLM" target="_blank">Github Repo</a> • 🐦 <a href="https://twitter.com/thukeg" target="_blank">Twitter</a> • 📃 <a href="https://arxiv.org/abs/2103.10360" target="_blank">[GLM@ACL 22]</a> <a href="https://github.com/THUDM/GLM" target="_blank">[GitHub]</a> • 📃 <a href="https://arxiv.org/abs/2210.02414" target="_blank">[GLM-130B@ICLR 23]</a> <a href="https://github.com/THUDM/GLM-130B" target="_blank">[GitHub]</a> <br> </p> <p align="center"> 👋 Join our <a href="https://join.slack.com/t/chatglm/shared_invite/zt-25ti5uohv-A_hs~am_D3Q8XPZMpj7wwQ" target="_blank">Slack</a> and <a href="https://github.com/THUDM/ChatGLM/blob/main/resources/WECHAT.md" target="_blank">WeChat</a> </p> <p align="center"> 📍Experience the larger-scale ChatGLM model at <a href="https://www.chatglm.cn">chatglm.cn</a> </p> ## 介绍 (Introduction) ChatGLM3-6B 是 ChatGLM 系列最新一代的开源模型,在保留了前两代模型对话流畅、部署门槛低等众多优秀特性的基础上,ChatGLM3-6B 引入了如下特性: 1. **更强大的基础模型:** ChatGLM3-6B 的基础模型 ChatGLM3-6B-Base 采用了更多样的训练数据、更充分的训练步数和更合理的训练策略。在语义、数学、推理、代码、知识等不同角度的数据集上测评显示,ChatGLM3-6B-Base 具有在 10B 以下的预训练模型中最强的性能。 2. **更完整的功能支持:** ChatGLM3-6B 采用了全新设计的 [Prompt 格式](https://github.com/THUDM/ChatGLM3/blob/main/PROMPT.md),除正常的多轮对话外。同时原生支持[工具调用](https://github.com/THUDM/ChatGLM3/blob/main/tool_using/README.md)(Function Call)、代码执行(Code Interpreter)和 Agent 任务等复杂场景。 3. **更全面的开源序列:** 除了对话模型 ChatGLM3-6B 外,还开源了基础模型 ChatGLM-6B-Base、长文本对话模型 ChatGLM3-6B-32K。以上所有权重对学术研究**完全开放**,在填写[问卷](https://open.bigmodel.cn/mla/form)进行登记后**亦允许免费商业使用**。 ChatGLM3-6B is the latest open-source model in the ChatGLM series. While retaining many excellent features such as smooth dialogue and low deployment threshold from the previous two generations, ChatGLM3-6B introduces the following features: 1. **More Powerful Base Model:** The base model of ChatGLM3-6B, ChatGLM3-6B-Base, employs a more diverse training dataset, more sufficient training steps, and a more reasonable training strategy. Evaluations on datasets such as semantics, mathematics, reasoning, code, knowledge, etc., show that ChatGLM3-6B-Base has the strongest performance among pre-trained models under 10B. 2. **More Comprehensive Function Support:** ChatGLM3-6B adopts a newly designed [Prompt format](https://github.com/THUDM/ChatGLM3/blob/main/PROMPT_en.md), in addition to the normal multi-turn dialogue. It also natively supports [function call](https://github.com/THUDM/ChatGLM3/blob/main/tool_using/README_en.md), code interpreter, and complex scenarios such as agent tasks. 3. **More Comprehensive Open-source Series:** In addition to the dialogue model ChatGLM3-6B, the base model ChatGLM-6B-Base and the long-text dialogue model ChatGLM3-6B-32K are also open-sourced. All the weights are **fully open** for academic research, and after completing the [questionnaire](https://open.bigmodel.cn/mla/form) registration, they are also **allowed for free commercial use**. ## 软件依赖 (Dependencies) ```shell pip install protobuf transformers==4.30.2 cpm_kernels torch>=2.0 gradio mdtex2html sentencepiece accelerate ``` ## 代码调用 (Code Usage) 可以通过如下代码调用 ChatGLM3-6B 模型来生成对话: You can generate dialogue by invoking the ChatGLM3-6B model with the following code: ```ipython >>> from transformers import AutoTokenizer, AutoModel >>> tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm3-6b", trust_remote_code=True) >>> model = AutoModel.from_pretrained("THUDM/chatglm3-6b", trust_remote_code=True).half().cuda() >>> model = model.eval() >>> response, history = model.chat(tokenizer, "你好", history=[]) >>> print(response) 你好👋!我是人工智能助手 ChatGLM-6B,很高兴见到你,欢迎问我任何问题。 >>> response, history = model.chat(tokenizer, "晚上睡不着应该怎么办", history=history) >>> print(response) 晚上睡不着可能会让你感到焦虑或不舒服,但以下是一些可以帮助你入睡的方法: 1. 制定规律的睡眠时间表:保持规律的睡眠时间表可以帮助你建立健康的睡眠习惯,使你更容易入睡。尽量在每天的相同时间上床,并在同一时间起床。 2. 创造一个舒适的睡眠环境:确保睡眠环境舒适,安静,黑暗且温度适宜。可以使用舒适的床上用品,并保持房间通风。 3. 放松身心:在睡前做些放松的活动,例如泡个热水澡,听些轻柔的音乐,阅读一些有趣的书籍等,有助于缓解紧张和焦虑,使你更容易入睡。 4. 避免饮用含有咖啡因的饮料:咖啡因是一种刺激性物质,会影响你的睡眠质量。尽量避免在睡前饮用含有咖啡因的饮料,例如咖啡,茶和可乐。 5. 避免在床上做与睡眠无关的事情:在床上做些与睡眠无关的事情,例如看电影,玩游戏或工作等,可能会干扰你的睡眠。 6. 尝试呼吸技巧:深呼吸是一种放松技巧,可以帮助你缓解紧张和焦虑,使你更容易入睡。试着慢慢吸气,保持几秒钟,然后缓慢呼气。 如果这些方法无法帮助你入睡,你可以考虑咨询医生或睡眠专家,寻求进一步的建议。 ``` 关于更多的使用说明,包括如何运行命令行和网页版本的 DEMO,以及使用模型量化以节省显存,请参考我们的 [Github Repo](https://github.com/THUDM/ChatGLM)。 For more instructions, including how to run CLI and web demos, and model quantization, please refer to our [Github Repo](https://github.com/THUDM/ChatGLM). ## 协议 (License) 本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源,ChatGLM3-6B 模型的权重的使用则需要遵循 [Model License](MODEL_LICENSE)。 The code in this repository is open-sourced under the [Apache-2.0 license](LICENSE), while the use of the ChatGLM3-6B model weights needs to comply with the [Model License](MODEL_LICENSE). ## 引用 (Citation) 如果你觉得我们的工作有帮助的话,请考虑引用下列论文。 If you find our work helpful, please consider citing the following papers. ``` @article{zeng2022glm, title={Glm-130b: An open bilingual pre-trained model}, author={Zeng, Aohan and Liu, Xiao and Du, Zhengxiao and Wang, Zihan and Lai, Hanyu and Ding, Ming and Yang, Zhuoyi and Xu, Yifan and Zheng, Wendi and Xia, Xiao and others}, journal={arXiv preprint arXiv:2210.02414}, year={2022} } ``` ``` @inproceedings{du2022glm, title={GLM: General Language Model Pretraining with Autoregressive Blank Infilling}, author={Du, Zhengxiao and Qian, Yujie and Liu, Xiao and Ding, Ming and Qiu, Jiezhong and Yang, Zhilin and Tang, Jie}, booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, pages={320--335}, year={2022} } ```
prajjwal1/bert-tiny
prajjwal1
"2021-10-27T18:29:01Z"
457,380
85
transformers
[ "transformers", "pytorch", "BERT", "MNLI", "NLI", "transformer", "pre-training", "en", "arxiv:1908.08962", "arxiv:2110.01518", "license:mit", "endpoints_compatible", "region:us" ]
null
"2022-03-02T23:29:05Z"
--- language: - en license: - mit tags: - BERT - MNLI - NLI - transformer - pre-training --- The following model is a Pytorch pre-trained model obtained from converting Tensorflow checkpoint found in the [official Google BERT repository](https://github.com/google-research/bert). This is one of the smaller pre-trained BERT variants, together with [bert-mini](https://huggingface.co/prajjwal1/bert-mini) [bert-small](https://huggingface.co/prajjwal1/bert-small) and [bert-medium](https://huggingface.co/prajjwal1/bert-medium). They were introduced in the study `Well-Read Students Learn Better: On the Importance of Pre-training Compact Models` ([arxiv](https://arxiv.org/abs/1908.08962)), and ported to HF for the study `Generalization in NLI: Ways (Not) To Go Beyond Simple Heuristics` ([arXiv](https://arxiv.org/abs/2110.01518)). These models are supposed to be trained on a downstream task. If you use the model, please consider citing both the papers: ``` @misc{bhargava2021generalization, title={Generalization in NLI: Ways (Not) To Go Beyond Simple Heuristics}, author={Prajjwal Bhargava and Aleksandr Drozd and Anna Rogers}, year={2021}, eprint={2110.01518}, archivePrefix={arXiv}, primaryClass={cs.CL} } @article{DBLP:journals/corr/abs-1908-08962, author = {Iulia Turc and Ming{-}Wei Chang and Kenton Lee and Kristina Toutanova}, title = {Well-Read Students Learn Better: The Impact of Student Initialization on Knowledge Distillation}, journal = {CoRR}, volume = {abs/1908.08962}, year = {2019}, url = {http://arxiv.org/abs/1908.08962}, eprinttype = {arXiv}, eprint = {1908.08962}, timestamp = {Thu, 29 Aug 2019 16:32:34 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-1908-08962.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` Config of this model: - `prajjwal1/bert-tiny` (L=2, H=128) [Model Link](https://huggingface.co/prajjwal1/bert-tiny) Other models to check out: - `prajjwal1/bert-mini` (L=4, H=256) [Model Link](https://huggingface.co/prajjwal1/bert-mini) - `prajjwal1/bert-small` (L=4, H=512) [Model Link](https://huggingface.co/prajjwal1/bert-small) - `prajjwal1/bert-medium` (L=8, H=512) [Model Link](https://huggingface.co/prajjwal1/bert-medium) Original Implementation and more info can be found in [this Github repository](https://github.com/prajjwal1/generalize_lm_nli). Twitter: [@prajjwal_1](https://twitter.com/prajjwal_1)
flair/ner-english
flair
"2021-03-02T22:11:28Z"
455,265
28
flair
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "dataset:conll2003", "has_space", "region:us" ]
token-classification
"2022-03-02T23:29:05Z"
--- tags: - flair - token-classification - sequence-tagger-model language: en datasets: - conll2003 widget: - text: "George Washington went to Washington" --- ## English NER in Flair (default model) This is the standard 4-class NER model for English that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **93,06** (corrected CoNLL-03) Predicts 4 tags: | **tag** | **meaning** | |---------------------------------|-----------| | PER | person name | | LOC | location name | | ORG | organization name | | MISC | other name | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/ner-english") # make example sentence sentence = Sentence("George Washington went to Washington") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [1,2]: "George Washington" [− Labels: PER (0.9968)] Span [5]: "Washington" [− Labels: LOC (0.9994)] ``` So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington went to Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import CONLL_03 from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. get the corpus corpus: Corpus = CONLL_03() # 2. what tag do we want to predict? tag_type = 'ner' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # GloVe embeddings WordEmbeddings('glove'), # contextual string embeddings, forward FlairEmbeddings('news-forward'), # contextual string embeddings, backward FlairEmbeddings('news-backward'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/ner-english', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
intfloat/multilingual-e5-small
intfloat
"2024-02-15T07:11:48Z"
451,341
86
sentence-transformers
[ "sentence-transformers", "pytorch", "onnx", "safetensors", "bert", "mteb", "Sentence Transformers", "sentence-similarity", "multilingual", "af", "am", "ar", "as", "az", "be", "bg", "bn", "br", "bs", "ca", "cs", "cy", "da", "de", "el", "en", "eo", "es", "et", "eu", "fa", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "hu", "hy", "id", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "ku", "ky", "la", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "om", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sa", "sd", "si", "sk", "sl", "so", "sq", "sr", "su", "sv", "sw", "ta", "te", "th", "tl", "tr", "ug", "uk", "ur", "uz", "vi", "xh", "yi", "zh", "arxiv:2402.05672", "arxiv:2108.08787", "arxiv:2104.08663", "arxiv:2210.07316", "license:mit", "model-index", "endpoints_compatible", "has_space", "region:us" ]
sentence-similarity
"2023-06-30T07:31:03Z"
--- tags: - mteb - Sentence Transformers - sentence-similarity - sentence-transformers model-index: - name: multilingual-e5-small results: - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (en) config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 73.79104477611939 - type: ap value: 36.9996434842022 - type: f1 value: 67.95453679103099 - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (de) config: de split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 71.64882226980728 - type: ap value: 82.11942130026586 - type: f1 value: 69.87963421606715 - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (en-ext) config: en-ext split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 75.8095952023988 - type: ap value: 24.46869495579561 - type: f1 value: 63.00108480037597 - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (ja) config: ja split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 64.186295503212 - type: ap value: 15.496804690197042 - type: f1 value: 52.07153895475031 - task: type: Classification dataset: type: mteb/amazon_polarity name: MTEB AmazonPolarityClassification config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 88.699325 - type: ap value: 85.27039559917269 - type: f1 value: 88.65556295032513 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (en) config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 44.69799999999999 - type: f1 value: 43.73187348654165 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (de) config: de split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 40.245999999999995 - type: f1 value: 39.3863530637684 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (es) config: es split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 40.394 - type: f1 value: 39.301223469483446 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (fr) config: fr split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 38.864 - type: f1 value: 37.97974261868003 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (ja) config: ja split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 37.682 - type: f1 value: 37.07399369768313 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (zh) config: zh split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 37.504 - type: f1 value: 36.62317273874278 - task: type: Retrieval dataset: type: arguana name: MTEB ArguAna config: default split: test revision: None metrics: - type: map_at_1 value: 19.061 - type: map_at_10 value: 31.703 - type: map_at_100 value: 32.967 - type: map_at_1000 value: 33.001000000000005 - type: map_at_3 value: 27.466 - type: map_at_5 value: 29.564 - type: mrr_at_1 value: 19.559 - type: mrr_at_10 value: 31.874999999999996 - type: mrr_at_100 value: 33.146 - type: mrr_at_1000 value: 33.18 - type: mrr_at_3 value: 27.667 - type: mrr_at_5 value: 29.74 - type: ndcg_at_1 value: 19.061 - type: ndcg_at_10 value: 39.062999999999995 - type: ndcg_at_100 value: 45.184000000000005 - type: ndcg_at_1000 value: 46.115 - type: ndcg_at_3 value: 30.203000000000003 - type: ndcg_at_5 value: 33.953 - type: precision_at_1 value: 19.061 - type: precision_at_10 value: 6.279999999999999 - type: precision_at_100 value: 0.9129999999999999 - type: precision_at_1000 value: 0.099 - type: precision_at_3 value: 12.706999999999999 - type: precision_at_5 value: 9.431000000000001 - type: recall_at_1 value: 19.061 - type: recall_at_10 value: 62.802 - type: recall_at_100 value: 91.323 - type: recall_at_1000 value: 98.72 - type: recall_at_3 value: 38.122 - type: recall_at_5 value: 47.155 - task: type: Clustering dataset: type: mteb/arxiv-clustering-p2p name: MTEB ArxivClusteringP2P config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 39.22266660528253 - task: type: Clustering dataset: type: mteb/arxiv-clustering-s2s name: MTEB ArxivClusteringS2S config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 30.79980849482483 - task: type: Reranking dataset: type: mteb/askubuntudupquestions-reranking name: MTEB AskUbuntuDupQuestions config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 57.8790068352054 - type: mrr value: 71.78791276436706 - task: type: STS dataset: type: mteb/biosses-sts name: MTEB BIOSSES config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 82.36328364043163 - type: cos_sim_spearman value: 82.26211536195868 - type: euclidean_pearson value: 80.3183865039173 - type: euclidean_spearman value: 79.88495276296132 - type: manhattan_pearson value: 80.14484480692127 - type: manhattan_spearman value: 80.39279565980743 - task: type: BitextMining dataset: type: mteb/bucc-bitext-mining name: MTEB BUCC (de-en) config: de-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 98.0375782881002 - type: f1 value: 97.86012526096033 - type: precision value: 97.77139874739039 - type: recall value: 98.0375782881002 - task: type: BitextMining dataset: type: mteb/bucc-bitext-mining name: MTEB BUCC (fr-en) config: fr-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 93.35241030156286 - type: f1 value: 92.66050333846944 - type: precision value: 92.3306919069631 - type: recall value: 93.35241030156286 - task: type: BitextMining dataset: type: mteb/bucc-bitext-mining name: MTEB BUCC (ru-en) config: ru-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 94.0699688257707 - type: f1 value: 93.50236693222492 - type: precision value: 93.22791825424315 - type: recall value: 94.0699688257707 - task: type: BitextMining dataset: type: mteb/bucc-bitext-mining name: MTEB BUCC (zh-en) config: zh-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 89.25750394944708 - type: f1 value: 88.79234684921889 - type: precision value: 88.57293312269616 - type: recall value: 89.25750394944708 - task: type: Classification dataset: type: mteb/banking77 name: MTEB Banking77Classification config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 79.41558441558442 - type: f1 value: 79.25886487487219 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-p2p name: MTEB BiorxivClusteringP2P config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 35.747820820329736 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-s2s name: MTEB BiorxivClusteringS2S config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 27.045143830596146 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 24.252999999999997 - type: map_at_10 value: 31.655916666666666 - type: map_at_100 value: 32.680749999999996 - type: map_at_1000 value: 32.79483333333334 - type: map_at_3 value: 29.43691666666666 - type: map_at_5 value: 30.717416666666665 - type: mrr_at_1 value: 28.602750000000004 - type: mrr_at_10 value: 35.56875 - type: mrr_at_100 value: 36.3595 - type: mrr_at_1000 value: 36.427749999999996 - type: mrr_at_3 value: 33.586166666666664 - type: mrr_at_5 value: 34.73641666666666 - type: ndcg_at_1 value: 28.602750000000004 - type: ndcg_at_10 value: 36.06933333333334 - type: ndcg_at_100 value: 40.70141666666667 - type: ndcg_at_1000 value: 43.24341666666667 - type: ndcg_at_3 value: 32.307916666666664 - type: ndcg_at_5 value: 34.129999999999995 - type: precision_at_1 value: 28.602750000000004 - type: precision_at_10 value: 6.097666666666667 - type: precision_at_100 value: 0.9809166666666668 - type: precision_at_1000 value: 0.13766666666666663 - type: precision_at_3 value: 14.628166666666667 - type: precision_at_5 value: 10.266916666666667 - type: recall_at_1 value: 24.252999999999997 - type: recall_at_10 value: 45.31916666666667 - type: recall_at_100 value: 66.03575000000001 - type: recall_at_1000 value: 83.94708333333334 - type: recall_at_3 value: 34.71941666666666 - type: recall_at_5 value: 39.46358333333333 - task: type: Retrieval dataset: type: climate-fever name: MTEB ClimateFEVER config: default split: test revision: None metrics: - type: map_at_1 value: 9.024000000000001 - type: map_at_10 value: 15.644 - type: map_at_100 value: 17.154 - type: map_at_1000 value: 17.345 - type: map_at_3 value: 13.028 - type: map_at_5 value: 14.251 - type: mrr_at_1 value: 19.674 - type: mrr_at_10 value: 29.826999999999998 - type: mrr_at_100 value: 30.935000000000002 - type: mrr_at_1000 value: 30.987 - type: mrr_at_3 value: 26.645000000000003 - type: mrr_at_5 value: 28.29 - type: ndcg_at_1 value: 19.674 - type: ndcg_at_10 value: 22.545 - type: ndcg_at_100 value: 29.207 - type: ndcg_at_1000 value: 32.912 - type: ndcg_at_3 value: 17.952 - type: ndcg_at_5 value: 19.363 - type: precision_at_1 value: 19.674 - type: precision_at_10 value: 7.212000000000001 - type: precision_at_100 value: 1.435 - type: precision_at_1000 value: 0.212 - type: precision_at_3 value: 13.507 - type: precision_at_5 value: 10.397 - type: recall_at_1 value: 9.024000000000001 - type: recall_at_10 value: 28.077999999999996 - type: recall_at_100 value: 51.403 - type: recall_at_1000 value: 72.406 - type: recall_at_3 value: 16.768 - type: recall_at_5 value: 20.737 - task: type: Retrieval dataset: type: dbpedia-entity name: MTEB DBPedia config: default split: test revision: None metrics: - type: map_at_1 value: 8.012 - type: map_at_10 value: 17.138 - type: map_at_100 value: 24.146 - type: map_at_1000 value: 25.622 - type: map_at_3 value: 12.552 - type: map_at_5 value: 14.435 - type: mrr_at_1 value: 62.25000000000001 - type: mrr_at_10 value: 71.186 - type: mrr_at_100 value: 71.504 - type: mrr_at_1000 value: 71.514 - type: mrr_at_3 value: 69.333 - type: mrr_at_5 value: 70.408 - type: ndcg_at_1 value: 49.75 - type: ndcg_at_10 value: 37.76 - type: ndcg_at_100 value: 42.071 - type: ndcg_at_1000 value: 49.309 - type: ndcg_at_3 value: 41.644 - type: ndcg_at_5 value: 39.812999999999995 - type: precision_at_1 value: 62.25000000000001 - type: precision_at_10 value: 30.15 - type: precision_at_100 value: 9.753 - type: precision_at_1000 value: 1.9189999999999998 - type: precision_at_3 value: 45.667 - type: precision_at_5 value: 39.15 - type: recall_at_1 value: 8.012 - type: recall_at_10 value: 22.599 - type: recall_at_100 value: 48.068 - type: recall_at_1000 value: 71.328 - type: recall_at_3 value: 14.043 - type: recall_at_5 value: 17.124 - task: type: Classification dataset: type: mteb/emotion name: MTEB EmotionClassification config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 42.455 - type: f1 value: 37.59462649781862 - task: type: Retrieval dataset: type: fever name: MTEB FEVER config: default split: test revision: None metrics: - type: map_at_1 value: 58.092 - type: map_at_10 value: 69.586 - type: map_at_100 value: 69.968 - type: map_at_1000 value: 69.982 - type: map_at_3 value: 67.48100000000001 - type: map_at_5 value: 68.915 - type: mrr_at_1 value: 62.166 - type: mrr_at_10 value: 73.588 - type: mrr_at_100 value: 73.86399999999999 - type: mrr_at_1000 value: 73.868 - type: mrr_at_3 value: 71.6 - type: mrr_at_5 value: 72.99 - type: ndcg_at_1 value: 62.166 - type: ndcg_at_10 value: 75.27199999999999 - type: ndcg_at_100 value: 76.816 - type: ndcg_at_1000 value: 77.09700000000001 - type: ndcg_at_3 value: 71.36 - type: ndcg_at_5 value: 73.785 - type: precision_at_1 value: 62.166 - type: precision_at_10 value: 9.716 - type: precision_at_100 value: 1.065 - type: precision_at_1000 value: 0.11 - type: precision_at_3 value: 28.278 - type: precision_at_5 value: 18.343999999999998 - type: recall_at_1 value: 58.092 - type: recall_at_10 value: 88.73400000000001 - type: recall_at_100 value: 95.195 - type: recall_at_1000 value: 97.04599999999999 - type: recall_at_3 value: 78.45 - type: recall_at_5 value: 84.316 - task: type: Retrieval dataset: type: fiqa name: MTEB FiQA2018 config: default split: test revision: None metrics: - type: map_at_1 value: 16.649 - type: map_at_10 value: 26.457000000000004 - type: map_at_100 value: 28.169 - type: map_at_1000 value: 28.352 - type: map_at_3 value: 23.305 - type: map_at_5 value: 25.169000000000004 - type: mrr_at_1 value: 32.407000000000004 - type: mrr_at_10 value: 40.922 - type: mrr_at_100 value: 41.931000000000004 - type: mrr_at_1000 value: 41.983 - type: mrr_at_3 value: 38.786 - type: mrr_at_5 value: 40.205999999999996 - type: ndcg_at_1 value: 32.407000000000004 - type: ndcg_at_10 value: 33.314 - type: ndcg_at_100 value: 40.312 - type: ndcg_at_1000 value: 43.685 - type: ndcg_at_3 value: 30.391000000000002 - type: ndcg_at_5 value: 31.525 - type: precision_at_1 value: 32.407000000000004 - type: precision_at_10 value: 8.966000000000001 - type: precision_at_100 value: 1.6019999999999999 - type: precision_at_1000 value: 0.22200000000000003 - type: precision_at_3 value: 20.165 - type: precision_at_5 value: 14.722 - type: recall_at_1 value: 16.649 - type: recall_at_10 value: 39.117000000000004 - type: recall_at_100 value: 65.726 - type: recall_at_1000 value: 85.784 - type: recall_at_3 value: 27.914 - type: recall_at_5 value: 33.289 - task: type: Retrieval dataset: type: hotpotqa name: MTEB HotpotQA config: default split: test revision: None metrics: - type: map_at_1 value: 36.253 - type: map_at_10 value: 56.16799999999999 - type: map_at_100 value: 57.06099999999999 - type: map_at_1000 value: 57.126 - type: map_at_3 value: 52.644999999999996 - type: map_at_5 value: 54.909 - type: mrr_at_1 value: 72.505 - type: mrr_at_10 value: 79.66 - type: mrr_at_100 value: 79.869 - type: mrr_at_1000 value: 79.88 - type: mrr_at_3 value: 78.411 - type: mrr_at_5 value: 79.19800000000001 - type: ndcg_at_1 value: 72.505 - type: ndcg_at_10 value: 65.094 - type: ndcg_at_100 value: 68.219 - type: ndcg_at_1000 value: 69.515 - type: ndcg_at_3 value: 59.99 - type: ndcg_at_5 value: 62.909000000000006 - type: precision_at_1 value: 72.505 - type: precision_at_10 value: 13.749 - type: precision_at_100 value: 1.619 - type: precision_at_1000 value: 0.179 - type: precision_at_3 value: 38.357 - type: precision_at_5 value: 25.313000000000002 - type: recall_at_1 value: 36.253 - type: recall_at_10 value: 68.744 - type: recall_at_100 value: 80.925 - type: recall_at_1000 value: 89.534 - type: recall_at_3 value: 57.535000000000004 - type: recall_at_5 value: 63.282000000000004 - task: type: Classification dataset: type: mteb/imdb name: MTEB ImdbClassification config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 80.82239999999999 - type: ap value: 75.65895781725314 - type: f1 value: 80.75880969095746 - task: type: Retrieval dataset: type: msmarco name: MTEB MSMARCO config: default split: dev revision: None metrics: - type: map_at_1 value: 21.624 - type: map_at_10 value: 34.075 - type: map_at_100 value: 35.229 - type: map_at_1000 value: 35.276999999999994 - type: map_at_3 value: 30.245 - type: map_at_5 value: 32.42 - type: mrr_at_1 value: 22.264 - type: mrr_at_10 value: 34.638000000000005 - type: mrr_at_100 value: 35.744 - type: mrr_at_1000 value: 35.787 - type: mrr_at_3 value: 30.891000000000002 - type: mrr_at_5 value: 33.042 - type: ndcg_at_1 value: 22.264 - type: ndcg_at_10 value: 40.991 - type: ndcg_at_100 value: 46.563 - type: ndcg_at_1000 value: 47.743 - type: ndcg_at_3 value: 33.198 - type: ndcg_at_5 value: 37.069 - type: precision_at_1 value: 22.264 - type: precision_at_10 value: 6.5089999999999995 - type: precision_at_100 value: 0.9299999999999999 - type: precision_at_1000 value: 0.10300000000000001 - type: precision_at_3 value: 14.216999999999999 - type: precision_at_5 value: 10.487 - type: recall_at_1 value: 21.624 - type: recall_at_10 value: 62.303 - type: recall_at_100 value: 88.124 - type: recall_at_1000 value: 97.08 - type: recall_at_3 value: 41.099999999999994 - type: recall_at_5 value: 50.381 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (en) config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 91.06703146374831 - type: f1 value: 90.86867815863172 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (de) config: de split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 87.46970977740209 - type: f1 value: 86.36832872036588 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (es) config: es split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 89.26951300867245 - type: f1 value: 88.93561193959502 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (fr) config: fr split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 84.22799874725963 - type: f1 value: 84.30490069236556 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (hi) config: hi split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 86.02007888131948 - type: f1 value: 85.39376041027991 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (th) config: th split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 85.34900542495481 - type: f1 value: 85.39859673336713 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (en) config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 71.078431372549 - type: f1 value: 53.45071102002276 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (de) config: de split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 65.85798816568047 - type: f1 value: 46.53112748993529 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (es) config: es split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 67.96864576384256 - type: f1 value: 45.966703022829506 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (fr) config: fr split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 61.31537738803633 - type: f1 value: 45.52601712835461 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (hi) config: hi split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 66.29616349946218 - type: f1 value: 47.24166485726613 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (th) config: th split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 67.51537070524412 - type: f1 value: 49.463476319014276 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (af) config: af split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 57.06792199058508 - type: f1 value: 54.094921857502285 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (am) config: am split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 51.960322797579025 - type: f1 value: 48.547371223370945 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ar) config: ar split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 54.425016812373904 - type: f1 value: 50.47069202054312 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (az) config: az split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 59.798251513113655 - type: f1 value: 57.05013069086648 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (bn) config: bn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 59.37794216543376 - type: f1 value: 56.3607992649805 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (cy) config: cy split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 46.56018829858777 - type: f1 value: 43.87319715715134 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (da) config: da split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 62.9724277067922 - type: f1 value: 59.36480066245562 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (de) config: de split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 62.72696704774715 - type: f1 value: 59.143595966615855 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (el) config: el split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 61.5971755211836 - type: f1 value: 59.169445724946726 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (en) config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 70.29589778076665 - type: f1 value: 67.7577001808977 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (es) config: es split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.31136516476126 - type: f1 value: 64.52032955983242 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (fa) config: fa split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 65.54472091459314 - type: f1 value: 61.47903120066317 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (fi) config: fi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 61.45595158036314 - type: f1 value: 58.0891846024637 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (fr) config: fr split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 65.47074646940149 - type: f1 value: 62.84830858877575 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (he) config: he split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.046402151983855 - type: f1 value: 55.269074430533195 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (hi) config: hi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.06523201075991 - type: f1 value: 61.35339643021369 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (hu) config: hu split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 60.954942837928726 - type: f1 value: 57.07035922704846 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (hy) config: hy split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 57.404169468728995 - type: f1 value: 53.94259011839138 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (id) config: id split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.16610625420309 - type: f1 value: 61.337103431499365 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (is) config: is split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 52.262945527908535 - type: f1 value: 49.7610691598921 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (it) config: it split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 65.54472091459314 - type: f1 value: 63.469099018440154 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ja) config: ja split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.22797579018157 - type: f1 value: 64.89098471083001 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (jv) config: jv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 50.847343644922674 - type: f1 value: 47.8536963168393 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ka) config: ka split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 48.45326160053799 - type: f1 value: 46.370078045805556 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (km) config: km split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 42.83120376597175 - type: f1 value: 39.68948521599982 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (kn) config: kn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 57.5084061869536 - type: f1 value: 53.961876160401545 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ko) config: ko split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 63.7895090786819 - type: f1 value: 61.134223684676 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (lv) config: lv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 54.98991257565569 - type: f1 value: 52.579862862826296 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ml) config: ml split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 61.90316072629456 - type: f1 value: 58.203024538290336 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (mn) config: mn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 57.09818426361802 - type: f1 value: 54.22718458445455 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ms) config: ms split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.991257565568255 - type: f1 value: 55.84892781767421 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (my) config: my split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 55.901143241425686 - type: f1 value: 52.25264332199797 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (nb) config: nb split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 61.96368527236047 - type: f1 value: 58.927243876153454 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (nl) config: nl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 65.64223268325489 - type: f1 value: 62.340453718379706 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (pl) config: pl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.52589105581708 - type: f1 value: 61.661113187022174 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (pt) config: pt split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.84599865501009 - type: f1 value: 64.59342572873005 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ro) config: ro split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 60.81035642232684 - type: f1 value: 57.5169089806797 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ru) config: ru split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 65.75991930060525 - type: f1 value: 62.89531115787938 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (sl) config: sl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 56.51647612642906 - type: f1 value: 54.33154780100043 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (sq) config: sq split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 57.985877605917956 - type: f1 value: 54.46187524463802 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (sv) config: sv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 65.03026227303296 - type: f1 value: 62.34377392877748 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (sw) config: sw split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 53.567585743106925 - type: f1 value: 50.73770655983206 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ta) config: ta split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 57.2595830531271 - type: f1 value: 53.657327291708626 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (te) config: te split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 57.82784129119032 - type: f1 value: 54.82518072665301 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (th) config: th split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.06859448554137 - type: f1 value: 63.00185280500495 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (tl) config: tl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.91055817081371 - type: f1 value: 55.54116301224262 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (tr) config: tr split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 63.54404841963686 - type: f1 value: 59.57650946030184 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ur) config: ur split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 59.27706792199059 - type: f1 value: 56.50010066083435 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (vi) config: vi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.0719569603228 - type: f1 value: 61.817075925647956 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (zh-CN) config: zh-CN split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.23806321452591 - type: f1 value: 65.24917026029749 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (zh-TW) config: zh-TW split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 62.53530598520511 - type: f1 value: 61.71131132295768 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (af) config: af split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 63.04303967720243 - type: f1 value: 60.3950085685985 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (am) config: am split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 56.83591123066578 - type: f1 value: 54.95059828830849 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ar) config: ar split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 59.62340282447881 - type: f1 value: 59.525159996498225 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (az) config: az split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 60.85406859448555 - type: f1 value: 59.129299095681276 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (bn) config: bn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.76731674512441 - type: f1 value: 61.159560612627715 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (cy) config: cy split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 50.181573638197705 - type: f1 value: 46.98422176289957 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (da) config: da split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 68.92737054472092 - type: f1 value: 67.69135611952979 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (de) config: de split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 69.18964357767318 - type: f1 value: 68.46106138186214 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (el) config: el split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 67.0712844653665 - type: f1 value: 66.75545422473901 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (en) config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 74.4754539340955 - type: f1 value: 74.38427146553252 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (es) config: es split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 69.82515131136518 - type: f1 value: 69.63516462173847 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (fa) config: fa split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 68.70880968392737 - type: f1 value: 67.45420662567926 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (fi) config: fi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 65.95494283792871 - type: f1 value: 65.06191009049222 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (fr) config: fr split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 68.75924680564896 - type: f1 value: 68.30833379585945 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (he) config: he split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 63.806321452589096 - type: f1 value: 63.273048243765054 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (hi) config: hi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 67.68997982515133 - type: f1 value: 66.54703855381324 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (hu) config: hu split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 66.46940147948891 - type: f1 value: 65.91017343463396 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (hy) config: hy split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 59.49899125756556 - type: f1 value: 57.90333469917769 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (id) config: id split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 67.9219905850706 - type: f1 value: 67.23169403762938 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (is) config: is split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 56.486213853396094 - type: f1 value: 54.85282355583758 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (it) config: it split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 69.04169468728985 - type: f1 value: 68.83833333320462 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ja) config: ja split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 73.88702084734365 - type: f1 value: 74.04474735232299 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (jv) config: jv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 56.63416274377943 - type: f1 value: 55.11332211687954 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ka) config: ka split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 52.23604572965702 - type: f1 value: 50.86529813991055 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (km) config: km split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 46.62407531943511 - type: f1 value: 43.63485467164535 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (kn) config: kn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 59.15601882985878 - type: f1 value: 57.522837510959924 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ko) config: ko split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 69.84532616005382 - type: f1 value: 69.60021127179697 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (lv) config: lv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 56.65770006724949 - type: f1 value: 55.84219135523227 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ml) config: ml split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 66.53665097511768 - type: f1 value: 65.09087787792639 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (mn) config: mn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 59.31405514458642 - type: f1 value: 58.06135303831491 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ms) config: ms split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 64.88231338264964 - type: f1 value: 62.751099407787926 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (my) config: my split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 58.86012104909213 - type: f1 value: 56.29118323058282 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (nb) config: nb split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 67.37390719569602 - type: f1 value: 66.27922244885102 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (nl) config: nl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.8675184936113 - type: f1 value: 70.22146529932019 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (pl) config: pl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 68.2212508406187 - type: f1 value: 67.77454802056282 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (pt) config: pt split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 68.18090114324143 - type: f1 value: 68.03737625431621 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ro) config: ro split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 64.65030262273034 - type: f1 value: 63.792945486912856 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ru) config: ru split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 69.48217888365838 - type: f1 value: 69.96028997292197 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (sl) config: sl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 60.17821116341627 - type: f1 value: 59.3935969827171 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (sq) config: sq split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.86146603900471 - type: f1 value: 60.133692735032376 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (sv) config: sv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.89441829186282 - type: f1 value: 70.03064076194089 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (sw) config: sw split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 58.15063887020847 - type: f1 value: 56.23326278499678 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ta) config: ta split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 59.43846671149966 - type: f1 value: 57.70440450281974 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (te) config: te split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 60.8507061197041 - type: f1 value: 59.22916396061171 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (th) config: th split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.65568258238063 - type: f1 value: 69.90736239440633 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (tl) config: tl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 60.8843308675185 - type: f1 value: 59.30332663713599 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (tr) config: tr split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 68.05312710154674 - type: f1 value: 67.44024062594775 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ur) config: ur split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.111634162743776 - type: f1 value: 60.89083013084519 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (vi) config: vi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 67.44115669132482 - type: f1 value: 67.92227541674552 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (zh-CN) config: zh-CN split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 74.4687289845326 - type: f1 value: 74.16376793486025 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (zh-TW) config: zh-TW split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 68.31876260928043 - type: f1 value: 68.5246745215607 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-p2p name: MTEB MedrxivClusteringP2P config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 30.90431696479766 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-s2s name: MTEB MedrxivClusteringS2S config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 27.259158476693774 - task: type: Reranking dataset: type: mteb/mind_small name: MTEB MindSmallReranking config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 30.28445330838555 - type: mrr value: 31.15758529581164 - task: type: Retrieval dataset: type: nfcorpus name: MTEB NFCorpus config: default split: test revision: None metrics: - type: map_at_1 value: 5.353 - type: map_at_10 value: 11.565 - type: map_at_100 value: 14.097000000000001 - type: map_at_1000 value: 15.354999999999999 - type: map_at_3 value: 8.749 - type: map_at_5 value: 9.974 - type: mrr_at_1 value: 42.105 - type: mrr_at_10 value: 50.589 - type: mrr_at_100 value: 51.187000000000005 - type: mrr_at_1000 value: 51.233 - type: mrr_at_3 value: 48.246 - type: mrr_at_5 value: 49.546 - type: ndcg_at_1 value: 40.402 - type: ndcg_at_10 value: 31.009999999999998 - type: ndcg_at_100 value: 28.026 - type: ndcg_at_1000 value: 36.905 - type: ndcg_at_3 value: 35.983 - type: ndcg_at_5 value: 33.764 - type: precision_at_1 value: 42.105 - type: precision_at_10 value: 22.786 - type: precision_at_100 value: 6.916 - type: precision_at_1000 value: 1.981 - type: precision_at_3 value: 33.333 - type: precision_at_5 value: 28.731 - type: recall_at_1 value: 5.353 - type: recall_at_10 value: 15.039 - type: recall_at_100 value: 27.348 - type: recall_at_1000 value: 59.453 - type: recall_at_3 value: 9.792 - type: recall_at_5 value: 11.882 - task: type: Retrieval dataset: type: nq name: MTEB NQ config: default split: test revision: None metrics: - type: map_at_1 value: 33.852 - type: map_at_10 value: 48.924 - type: map_at_100 value: 49.854 - type: map_at_1000 value: 49.886 - type: map_at_3 value: 44.9 - type: map_at_5 value: 47.387 - type: mrr_at_1 value: 38.035999999999994 - type: mrr_at_10 value: 51.644 - type: mrr_at_100 value: 52.339 - type: mrr_at_1000 value: 52.35999999999999 - type: mrr_at_3 value: 48.421 - type: mrr_at_5 value: 50.468999999999994 - type: ndcg_at_1 value: 38.007000000000005 - type: ndcg_at_10 value: 56.293000000000006 - type: ndcg_at_100 value: 60.167 - type: ndcg_at_1000 value: 60.916000000000004 - type: ndcg_at_3 value: 48.903999999999996 - type: ndcg_at_5 value: 52.978 - type: precision_at_1 value: 38.007000000000005 - type: precision_at_10 value: 9.041 - type: precision_at_100 value: 1.1199999999999999 - type: precision_at_1000 value: 0.11900000000000001 - type: precision_at_3 value: 22.084 - type: precision_at_5 value: 15.608 - type: recall_at_1 value: 33.852 - type: recall_at_10 value: 75.893 - type: recall_at_100 value: 92.589 - type: recall_at_1000 value: 98.153 - type: recall_at_3 value: 56.969 - type: recall_at_5 value: 66.283 - task: type: Retrieval dataset: type: quora name: MTEB QuoraRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 69.174 - type: map_at_10 value: 82.891 - type: map_at_100 value: 83.545 - type: map_at_1000 value: 83.56700000000001 - type: map_at_3 value: 79.944 - type: map_at_5 value: 81.812 - type: mrr_at_1 value: 79.67999999999999 - type: mrr_at_10 value: 86.279 - type: mrr_at_100 value: 86.39 - type: mrr_at_1000 value: 86.392 - type: mrr_at_3 value: 85.21 - type: mrr_at_5 value: 85.92999999999999 - type: ndcg_at_1 value: 79.69000000000001 - type: ndcg_at_10 value: 86.929 - type: ndcg_at_100 value: 88.266 - type: ndcg_at_1000 value: 88.428 - type: ndcg_at_3 value: 83.899 - type: ndcg_at_5 value: 85.56700000000001 - type: precision_at_1 value: 79.69000000000001 - type: precision_at_10 value: 13.161000000000001 - type: precision_at_100 value: 1.513 - type: precision_at_1000 value: 0.156 - type: precision_at_3 value: 36.603 - type: precision_at_5 value: 24.138 - type: recall_at_1 value: 69.174 - type: recall_at_10 value: 94.529 - type: recall_at_100 value: 99.15 - type: recall_at_1000 value: 99.925 - type: recall_at_3 value: 85.86200000000001 - type: recall_at_5 value: 90.501 - task: type: Clustering dataset: type: mteb/reddit-clustering name: MTEB RedditClustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 39.13064340585255 - task: type: Clustering dataset: type: mteb/reddit-clustering-p2p name: MTEB RedditClusteringP2P config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 58.97884249325877 - task: type: Retrieval dataset: type: scidocs name: MTEB SCIDOCS config: default split: test revision: None metrics: - type: map_at_1 value: 3.4680000000000004 - type: map_at_10 value: 7.865 - type: map_at_100 value: 9.332 - type: map_at_1000 value: 9.587 - type: map_at_3 value: 5.800000000000001 - type: map_at_5 value: 6.8790000000000004 - type: mrr_at_1 value: 17.0 - type: mrr_at_10 value: 25.629 - type: mrr_at_100 value: 26.806 - type: mrr_at_1000 value: 26.889000000000003 - type: mrr_at_3 value: 22.8 - type: mrr_at_5 value: 24.26 - type: ndcg_at_1 value: 17.0 - type: ndcg_at_10 value: 13.895 - type: ndcg_at_100 value: 20.491999999999997 - type: ndcg_at_1000 value: 25.759999999999998 - type: ndcg_at_3 value: 13.347999999999999 - type: ndcg_at_5 value: 11.61 - type: precision_at_1 value: 17.0 - type: precision_at_10 value: 7.090000000000001 - type: precision_at_100 value: 1.669 - type: precision_at_1000 value: 0.294 - type: precision_at_3 value: 12.3 - type: precision_at_5 value: 10.02 - type: recall_at_1 value: 3.4680000000000004 - type: recall_at_10 value: 14.363000000000001 - type: recall_at_100 value: 33.875 - type: recall_at_1000 value: 59.711999999999996 - type: recall_at_3 value: 7.483 - type: recall_at_5 value: 10.173 - task: type: STS dataset: type: mteb/sickr-sts name: MTEB SICK-R config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 83.04084311714061 - type: cos_sim_spearman value: 77.51342467443078 - type: euclidean_pearson value: 80.0321166028479 - type: euclidean_spearman value: 77.29249114733226 - type: manhattan_pearson value: 80.03105964262431 - type: manhattan_spearman value: 77.22373689514794 - task: type: STS dataset: type: mteb/sts12-sts name: MTEB STS12 config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 84.1680158034387 - type: cos_sim_spearman value: 76.55983344071117 - type: euclidean_pearson value: 79.75266678300143 - type: euclidean_spearman value: 75.34516823467025 - type: manhattan_pearson value: 79.75959151517357 - type: manhattan_spearman value: 75.42330344141912 - task: type: STS dataset: type: mteb/sts13-sts name: MTEB STS13 config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 76.48898993209346 - type: cos_sim_spearman value: 76.96954120323366 - type: euclidean_pearson value: 76.94139109279668 - type: euclidean_spearman value: 76.85860283201711 - type: manhattan_pearson value: 76.6944095091912 - type: manhattan_spearman value: 76.61096912972553 - task: type: STS dataset: type: mteb/sts14-sts name: MTEB STS14 config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 77.85082366246944 - type: cos_sim_spearman value: 75.52053350101731 - type: euclidean_pearson value: 77.1165845070926 - type: euclidean_spearman value: 75.31216065884388 - type: manhattan_pearson value: 77.06193941833494 - type: manhattan_spearman value: 75.31003701700112 - task: type: STS dataset: type: mteb/sts15-sts name: MTEB STS15 config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 86.36305246526497 - type: cos_sim_spearman value: 87.11704613927415 - type: euclidean_pearson value: 86.04199125810939 - type: euclidean_spearman value: 86.51117572414263 - type: manhattan_pearson value: 86.0805106816633 - type: manhattan_spearman value: 86.52798366512229 - task: type: STS dataset: type: mteb/sts16-sts name: MTEB STS16 config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 82.18536255599724 - type: cos_sim_spearman value: 83.63377151025418 - type: euclidean_pearson value: 83.24657467993141 - type: euclidean_spearman value: 84.02751481993825 - type: manhattan_pearson value: 83.11941806582371 - type: manhattan_spearman value: 83.84251281019304 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (ko-ko) config: ko-ko split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 78.95816528475514 - type: cos_sim_spearman value: 78.86607380120462 - type: euclidean_pearson value: 78.51268699230545 - type: euclidean_spearman value: 79.11649316502229 - type: manhattan_pearson value: 78.32367302808157 - type: manhattan_spearman value: 78.90277699624637 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (ar-ar) config: ar-ar split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 72.89126914997624 - type: cos_sim_spearman value: 73.0296921832678 - type: euclidean_pearson value: 71.50385903677738 - type: euclidean_spearman value: 73.13368899716289 - type: manhattan_pearson value: 71.47421463379519 - type: manhattan_spearman value: 73.03383242946575 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-ar) config: en-ar split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 59.22923684492637 - type: cos_sim_spearman value: 57.41013211368396 - type: euclidean_pearson value: 61.21107388080905 - type: euclidean_spearman value: 60.07620768697254 - type: manhattan_pearson value: 59.60157142786555 - type: manhattan_spearman value: 59.14069604103739 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-de) config: en-de split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 76.24345978774299 - type: cos_sim_spearman value: 77.24225743830719 - type: euclidean_pearson value: 76.66226095469165 - type: euclidean_spearman value: 77.60708820493146 - type: manhattan_pearson value: 76.05303324760429 - type: manhattan_spearman value: 76.96353149912348 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-en) config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 85.50879160160852 - type: cos_sim_spearman value: 86.43594662965224 - type: euclidean_pearson value: 86.06846012826577 - type: euclidean_spearman value: 86.02041395794136 - type: manhattan_pearson value: 86.10916255616904 - type: manhattan_spearman value: 86.07346068198953 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-tr) config: en-tr split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 58.39803698977196 - type: cos_sim_spearman value: 55.96910950423142 - type: euclidean_pearson value: 58.17941175613059 - type: euclidean_spearman value: 55.03019330522745 - type: manhattan_pearson value: 57.333358138183286 - type: manhattan_spearman value: 54.04614023149965 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (es-en) config: es-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 70.98304089637197 - type: cos_sim_spearman value: 72.44071656215888 - type: euclidean_pearson value: 72.19224359033983 - type: euclidean_spearman value: 73.89871188913025 - type: manhattan_pearson value: 71.21098311547406 - type: manhattan_spearman value: 72.93405764824821 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (es-es) config: es-es split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 85.99792397466308 - type: cos_sim_spearman value: 84.83824377879495 - type: euclidean_pearson value: 85.70043288694438 - type: euclidean_spearman value: 84.70627558703686 - type: manhattan_pearson value: 85.89570850150801 - type: manhattan_spearman value: 84.95806105313007 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (fr-en) config: fr-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 72.21850322994712 - type: cos_sim_spearman value: 72.28669398117248 - type: euclidean_pearson value: 73.40082510412948 - type: euclidean_spearman value: 73.0326539281865 - type: manhattan_pearson value: 71.8659633964841 - type: manhattan_spearman value: 71.57817425823303 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (it-en) config: it-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 75.80921368595645 - type: cos_sim_spearman value: 77.33209091229315 - type: euclidean_pearson value: 76.53159540154829 - type: euclidean_spearman value: 78.17960842810093 - type: manhattan_pearson value: 76.13530186637601 - type: manhattan_spearman value: 78.00701437666875 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (nl-en) config: nl-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 74.74980608267349 - type: cos_sim_spearman value: 75.37597374318821 - type: euclidean_pearson value: 74.90506081911661 - type: euclidean_spearman value: 75.30151613124521 - type: manhattan_pearson value: 74.62642745918002 - type: manhattan_spearman value: 75.18619716592303 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (en) config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 59.632662289205584 - type: cos_sim_spearman value: 60.938543391610914 - type: euclidean_pearson value: 62.113200529767056 - type: euclidean_spearman value: 61.410312633261164 - type: manhattan_pearson value: 61.75494698945686 - type: manhattan_spearman value: 60.92726195322362 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (de) config: de split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 45.283470551557244 - type: cos_sim_spearman value: 53.44833015864201 - type: euclidean_pearson value: 41.17892011120893 - type: euclidean_spearman value: 53.81441383126767 - type: manhattan_pearson value: 41.17482200420659 - type: manhattan_spearman value: 53.82180269276363 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (es) config: es split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 60.5069165306236 - type: cos_sim_spearman value: 66.87803259033826 - type: euclidean_pearson value: 63.5428979418236 - type: euclidean_spearman value: 66.9293576586897 - type: manhattan_pearson value: 63.59789526178922 - type: manhattan_spearman value: 66.86555009875066 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (pl) config: pl split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 28.23026196280264 - type: cos_sim_spearman value: 35.79397812652861 - type: euclidean_pearson value: 17.828102102767353 - type: euclidean_spearman value: 35.721501145568894 - type: manhattan_pearson value: 17.77134274219677 - type: manhattan_spearman value: 35.98107902846267 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (tr) config: tr split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 56.51946541393812 - type: cos_sim_spearman value: 63.714686006214485 - type: euclidean_pearson value: 58.32104651305898 - type: euclidean_spearman value: 62.237110895702216 - type: manhattan_pearson value: 58.579416468759185 - type: manhattan_spearman value: 62.459738981727 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (ar) config: ar split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 48.76009839569795 - type: cos_sim_spearman value: 56.65188431953149 - type: euclidean_pearson value: 50.997682160915595 - type: euclidean_spearman value: 55.99910008818135 - type: manhattan_pearson value: 50.76220659606342 - type: manhattan_spearman value: 55.517347595391456 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (ru) config: ru split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 51.232731157702425 - type: cos_sim_spearman value: 59.89531877658345 - type: euclidean_pearson value: 49.937914570348376 - type: euclidean_spearman value: 60.220905659334036 - type: manhattan_pearson value: 50.00987996844193 - type: manhattan_spearman value: 60.081341480977926 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (zh) config: zh split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 54.717524559088005 - type: cos_sim_spearman value: 66.83570886252286 - type: euclidean_pearson value: 58.41338625505467 - type: euclidean_spearman value: 66.68991427704938 - type: manhattan_pearson value: 58.78638572916807 - type: manhattan_spearman value: 66.58684161046335 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (fr) config: fr split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 73.2962042954962 - type: cos_sim_spearman value: 76.58255504852025 - type: euclidean_pearson value: 75.70983192778257 - type: euclidean_spearman value: 77.4547684870542 - type: manhattan_pearson value: 75.75565853870485 - type: manhattan_spearman value: 76.90208974949428 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (de-en) config: de-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 54.47396266924846 - type: cos_sim_spearman value: 56.492267162048606 - type: euclidean_pearson value: 55.998505203070195 - type: euclidean_spearman value: 56.46447012960222 - type: manhattan_pearson value: 54.873172394430995 - type: manhattan_spearman value: 56.58111534551218 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (es-en) config: es-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 69.87177267688686 - type: cos_sim_spearman value: 74.57160943395763 - type: euclidean_pearson value: 70.88330406826788 - type: euclidean_spearman value: 74.29767636038422 - type: manhattan_pearson value: 71.38245248369536 - type: manhattan_spearman value: 74.53102232732175 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (it) config: it split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 72.80225656959544 - type: cos_sim_spearman value: 76.52646173725735 - type: euclidean_pearson value: 73.95710720200799 - type: euclidean_spearman value: 76.54040031984111 - type: manhattan_pearson value: 73.89679971946774 - type: manhattan_spearman value: 76.60886958161574 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (pl-en) config: pl-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 70.70844249898789 - type: cos_sim_spearman value: 72.68571783670241 - type: euclidean_pearson value: 72.38800772441031 - type: euclidean_spearman value: 72.86804422703312 - type: manhattan_pearson value: 71.29840508203515 - type: manhattan_spearman value: 71.86264441749513 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (zh-en) config: zh-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 58.647478923935694 - type: cos_sim_spearman value: 63.74453623540931 - type: euclidean_pearson value: 59.60138032437505 - type: euclidean_spearman value: 63.947930832166065 - type: manhattan_pearson value: 58.59735509491861 - type: manhattan_spearman value: 62.082503844627404 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (es-it) config: es-it split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 65.8722516867162 - type: cos_sim_spearman value: 71.81208592523012 - type: euclidean_pearson value: 67.95315252165956 - type: euclidean_spearman value: 73.00749822046009 - type: manhattan_pearson value: 68.07884688638924 - type: manhattan_spearman value: 72.34210325803069 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (de-fr) config: de-fr split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 54.5405814240949 - type: cos_sim_spearman value: 60.56838649023775 - type: euclidean_pearson value: 53.011731611314104 - type: euclidean_spearman value: 58.533194841668426 - type: manhattan_pearson value: 53.623067729338494 - type: manhattan_spearman value: 58.018756154446926 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (de-pl) config: de-pl split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 13.611046866216112 - type: cos_sim_spearman value: 28.238192909158492 - type: euclidean_pearson value: 22.16189199885129 - type: euclidean_spearman value: 35.012895679076564 - type: manhattan_pearson value: 21.969771178698387 - type: manhattan_spearman value: 32.456985088607475 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (fr-pl) config: fr-pl split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 74.58077407011655 - type: cos_sim_spearman value: 84.51542547285167 - type: euclidean_pearson value: 74.64613843596234 - type: euclidean_spearman value: 84.51542547285167 - type: manhattan_pearson value: 75.15335973101396 - type: manhattan_spearman value: 84.51542547285167 - task: type: STS dataset: type: mteb/stsbenchmark-sts name: MTEB STSBenchmark config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 82.0739825531578 - type: cos_sim_spearman value: 84.01057479311115 - type: euclidean_pearson value: 83.85453227433344 - type: euclidean_spearman value: 84.01630226898655 - type: manhattan_pearson value: 83.75323603028978 - type: manhattan_spearman value: 83.89677983727685 - task: type: Reranking dataset: type: mteb/scidocs-reranking name: MTEB SciDocsRR config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 78.12945623123957 - type: mrr value: 93.87738713719106 - task: type: Retrieval dataset: type: scifact name: MTEB SciFact config: default split: test revision: None metrics: - type: map_at_1 value: 52.983000000000004 - type: map_at_10 value: 62.946000000000005 - type: map_at_100 value: 63.514 - type: map_at_1000 value: 63.554 - type: map_at_3 value: 60.183 - type: map_at_5 value: 61.672000000000004 - type: mrr_at_1 value: 55.667 - type: mrr_at_10 value: 64.522 - type: mrr_at_100 value: 64.957 - type: mrr_at_1000 value: 64.995 - type: mrr_at_3 value: 62.388999999999996 - type: mrr_at_5 value: 63.639 - type: ndcg_at_1 value: 55.667 - type: ndcg_at_10 value: 67.704 - type: ndcg_at_100 value: 70.299 - type: ndcg_at_1000 value: 71.241 - type: ndcg_at_3 value: 62.866 - type: ndcg_at_5 value: 65.16999999999999 - type: precision_at_1 value: 55.667 - type: precision_at_10 value: 9.033 - type: precision_at_100 value: 1.053 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 24.444 - type: precision_at_5 value: 16.133 - type: recall_at_1 value: 52.983000000000004 - type: recall_at_10 value: 80.656 - type: recall_at_100 value: 92.5 - type: recall_at_1000 value: 99.667 - type: recall_at_3 value: 67.744 - type: recall_at_5 value: 73.433 - task: type: PairClassification dataset: type: mteb/sprintduplicatequestions-pairclassification name: MTEB SprintDuplicateQuestions config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.72772277227723 - type: cos_sim_ap value: 92.17845897992215 - type: cos_sim_f1 value: 85.9746835443038 - type: cos_sim_precision value: 87.07692307692308 - type: cos_sim_recall value: 84.89999999999999 - type: dot_accuracy value: 99.3039603960396 - type: dot_ap value: 60.70244020124878 - type: dot_f1 value: 59.92742353551063 - type: dot_precision value: 62.21743810548978 - type: dot_recall value: 57.8 - type: euclidean_accuracy value: 99.71683168316832 - type: euclidean_ap value: 91.53997039964659 - type: euclidean_f1 value: 84.88372093023257 - type: euclidean_precision value: 90.02242152466367 - type: euclidean_recall value: 80.30000000000001 - type: manhattan_accuracy value: 99.72376237623763 - type: manhattan_ap value: 91.80756777790289 - type: manhattan_f1 value: 85.48468106479157 - type: manhattan_precision value: 85.8728557013118 - type: manhattan_recall value: 85.1 - type: max_accuracy value: 99.72772277227723 - type: max_ap value: 92.17845897992215 - type: max_f1 value: 85.9746835443038 - task: type: Clustering dataset: type: mteb/stackexchange-clustering name: MTEB StackExchangeClustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 53.52464042600003 - task: type: Clustering dataset: type: mteb/stackexchange-clustering-p2p name: MTEB StackExchangeClusteringP2P config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 32.071631948736 - task: type: Reranking dataset: type: mteb/stackoverflowdupquestions-reranking name: MTEB StackOverflowDupQuestions config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 49.19552407604654 - type: mrr value: 49.95269130379425 - task: type: Summarization dataset: type: mteb/summeval name: MTEB SummEval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 29.345293033095427 - type: cos_sim_spearman value: 29.976931423258403 - type: dot_pearson value: 27.047078008958408 - type: dot_spearman value: 27.75894368380218 - task: type: Retrieval dataset: type: trec-covid name: MTEB TRECCOVID config: default split: test revision: None metrics: - type: map_at_1 value: 0.22 - type: map_at_10 value: 1.706 - type: map_at_100 value: 9.634 - type: map_at_1000 value: 23.665 - type: map_at_3 value: 0.5950000000000001 - type: map_at_5 value: 0.95 - type: mrr_at_1 value: 86.0 - type: mrr_at_10 value: 91.8 - type: mrr_at_100 value: 91.8 - type: mrr_at_1000 value: 91.8 - type: mrr_at_3 value: 91.0 - type: mrr_at_5 value: 91.8 - type: ndcg_at_1 value: 80.0 - type: ndcg_at_10 value: 72.573 - type: ndcg_at_100 value: 53.954 - type: ndcg_at_1000 value: 47.760999999999996 - type: ndcg_at_3 value: 76.173 - type: ndcg_at_5 value: 75.264 - type: precision_at_1 value: 86.0 - type: precision_at_10 value: 76.4 - type: precision_at_100 value: 55.50000000000001 - type: precision_at_1000 value: 21.802 - type: precision_at_3 value: 81.333 - type: precision_at_5 value: 80.4 - type: recall_at_1 value: 0.22 - type: recall_at_10 value: 1.925 - type: recall_at_100 value: 12.762 - type: recall_at_1000 value: 44.946000000000005 - type: recall_at_3 value: 0.634 - type: recall_at_5 value: 1.051 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (sqi-eng) config: sqi-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 91.0 - type: f1 value: 88.55666666666666 - type: precision value: 87.46166666666667 - type: recall value: 91.0 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (fry-eng) config: fry-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 57.22543352601156 - type: f1 value: 51.03220478943021 - type: precision value: 48.8150289017341 - type: recall value: 57.22543352601156 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (kur-eng) config: kur-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 46.58536585365854 - type: f1 value: 39.66870798578116 - type: precision value: 37.416085946573745 - type: recall value: 46.58536585365854 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (tur-eng) config: tur-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 89.7 - type: f1 value: 86.77999999999999 - type: precision value: 85.45333333333332 - type: recall value: 89.7 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (deu-eng) config: deu-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.39999999999999 - type: f1 value: 96.58333333333331 - type: precision value: 96.2 - type: recall value: 97.39999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (nld-eng) config: nld-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.4 - type: f1 value: 90.3 - type: precision value: 89.31666666666668 - type: recall value: 92.4 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ron-eng) config: ron-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 86.9 - type: f1 value: 83.67190476190476 - type: precision value: 82.23333333333332 - type: recall value: 86.9 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ang-eng) config: ang-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 50.0 - type: f1 value: 42.23229092632078 - type: precision value: 39.851634683724235 - type: recall value: 50.0 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ido-eng) config: ido-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 76.3 - type: f1 value: 70.86190476190477 - type: precision value: 68.68777777777777 - type: recall value: 76.3 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (jav-eng) config: jav-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 57.073170731707314 - type: f1 value: 50.658958927251604 - type: precision value: 48.26480836236933 - type: recall value: 57.073170731707314 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (isl-eng) config: isl-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 68.2 - type: f1 value: 62.156507936507936 - type: precision value: 59.84964285714286 - type: recall value: 68.2 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (slv-eng) config: slv-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 77.52126366950182 - type: f1 value: 72.8496210148701 - type: precision value: 70.92171498003819 - type: recall value: 77.52126366950182 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (cym-eng) config: cym-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 70.78260869565217 - type: f1 value: 65.32422360248447 - type: precision value: 63.063067367415194 - type: recall value: 70.78260869565217 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (kaz-eng) config: kaz-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 78.43478260869566 - type: f1 value: 73.02608695652172 - type: precision value: 70.63768115942028 - type: recall value: 78.43478260869566 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (est-eng) config: est-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 60.9 - type: f1 value: 55.309753694581275 - type: precision value: 53.130476190476195 - type: recall value: 60.9 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (heb-eng) config: heb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 72.89999999999999 - type: f1 value: 67.92023809523809 - type: precision value: 65.82595238095237 - type: recall value: 72.89999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (gla-eng) config: gla-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 46.80337756332931 - type: f1 value: 39.42174900558496 - type: precision value: 36.97101116280851 - type: recall value: 46.80337756332931 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (mar-eng) config: mar-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 89.8 - type: f1 value: 86.79 - type: precision value: 85.375 - type: recall value: 89.8 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (lat-eng) config: lat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 47.199999999999996 - type: f1 value: 39.95484348984349 - type: precision value: 37.561071428571424 - type: recall value: 47.199999999999996 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (bel-eng) config: bel-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 87.8 - type: f1 value: 84.68190476190475 - type: precision value: 83.275 - type: recall value: 87.8 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (pms-eng) config: pms-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 48.76190476190476 - type: f1 value: 42.14965986394558 - type: precision value: 39.96743626743626 - type: recall value: 48.76190476190476 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (gle-eng) config: gle-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 66.10000000000001 - type: f1 value: 59.58580086580086 - type: precision value: 57.150238095238095 - type: recall value: 66.10000000000001 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (pes-eng) config: pes-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 87.3 - type: f1 value: 84.0 - type: precision value: 82.48666666666666 - type: recall value: 87.3 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (nob-eng) config: nob-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.4 - type: f1 value: 87.79523809523809 - type: precision value: 86.6 - type: recall value: 90.4 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (bul-eng) config: bul-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 87.0 - type: f1 value: 83.81 - type: precision value: 82.36666666666666 - type: recall value: 87.0 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (cbk-eng) config: cbk-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 63.9 - type: f1 value: 57.76533189033189 - type: precision value: 55.50595238095239 - type: recall value: 63.9 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (hun-eng) config: hun-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 76.1 - type: f1 value: 71.83690476190478 - type: precision value: 70.04928571428573 - type: recall value: 76.1 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (uig-eng) config: uig-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 66.3 - type: f1 value: 59.32626984126984 - type: precision value: 56.62535714285713 - type: recall value: 66.3 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (rus-eng) config: rus-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.60000000000001 - type: f1 value: 87.96333333333334 - type: precision value: 86.73333333333333 - type: recall value: 90.60000000000001 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (spa-eng) config: spa-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.10000000000001 - type: f1 value: 91.10000000000001 - type: precision value: 90.16666666666666 - type: recall value: 93.10000000000001 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (hye-eng) config: hye-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 85.71428571428571 - type: f1 value: 82.29142600436403 - type: precision value: 80.8076626877166 - type: recall value: 85.71428571428571 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (tel-eng) config: tel-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.88888888888889 - type: f1 value: 85.7834757834758 - type: precision value: 84.43732193732193 - type: recall value: 88.88888888888889 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (afr-eng) config: afr-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.5 - type: f1 value: 85.67190476190476 - type: precision value: 84.43333333333332 - type: recall value: 88.5 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (mon-eng) config: mon-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 82.72727272727273 - type: f1 value: 78.21969696969695 - type: precision value: 76.18181818181819 - type: recall value: 82.72727272727273 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (arz-eng) config: arz-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 61.0062893081761 - type: f1 value: 55.13976240391334 - type: precision value: 52.92112499659669 - type: recall value: 61.0062893081761 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (hrv-eng) config: hrv-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 89.5 - type: f1 value: 86.86666666666666 - type: precision value: 85.69166666666668 - type: recall value: 89.5 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (nov-eng) config: nov-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 73.54085603112841 - type: f1 value: 68.56031128404669 - type: precision value: 66.53047989623866 - type: recall value: 73.54085603112841 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (gsw-eng) config: gsw-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 43.58974358974359 - type: f1 value: 36.45299145299145 - type: precision value: 33.81155881155882 - type: recall value: 43.58974358974359 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (nds-eng) config: nds-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 59.599999999999994 - type: f1 value: 53.264689754689755 - type: precision value: 50.869166666666665 - type: recall value: 59.599999999999994 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ukr-eng) config: ukr-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 85.2 - type: f1 value: 81.61666666666665 - type: precision value: 80.02833333333335 - type: recall value: 85.2 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (uzb-eng) config: uzb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 63.78504672897196 - type: f1 value: 58.00029669188548 - type: precision value: 55.815809968847354 - type: recall value: 63.78504672897196 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (lit-eng) config: lit-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 66.5 - type: f1 value: 61.518333333333345 - type: precision value: 59.622363699102834 - type: recall value: 66.5 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ina-eng) config: ina-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.6 - type: f1 value: 85.60222222222221 - type: precision value: 84.27916666666665 - type: recall value: 88.6 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (lfn-eng) config: lfn-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 58.699999999999996 - type: f1 value: 52.732375957375965 - type: precision value: 50.63214035964035 - type: recall value: 58.699999999999996 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (zsm-eng) config: zsm-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.10000000000001 - type: f1 value: 89.99666666666667 - type: precision value: 89.03333333333333 - type: recall value: 92.10000000000001 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ita-eng) config: ita-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.10000000000001 - type: f1 value: 87.55666666666667 - type: precision value: 86.36166666666668 - type: recall value: 90.10000000000001 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (cmn-eng) config: cmn-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 91.4 - type: f1 value: 88.89000000000001 - type: precision value: 87.71166666666666 - type: recall value: 91.4 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (lvs-eng) config: lvs-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 65.7 - type: f1 value: 60.67427750410509 - type: precision value: 58.71785714285714 - type: recall value: 65.7 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (glg-eng) config: glg-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 85.39999999999999 - type: f1 value: 81.93190476190475 - type: precision value: 80.37833333333333 - type: recall value: 85.39999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ceb-eng) config: ceb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 47.833333333333336 - type: f1 value: 42.006625781625786 - type: precision value: 40.077380952380956 - type: recall value: 47.833333333333336 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (bre-eng) config: bre-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 10.4 - type: f1 value: 8.24465007215007 - type: precision value: 7.664597069597071 - type: recall value: 10.4 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ben-eng) config: ben-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 82.6 - type: f1 value: 77.76333333333334 - type: precision value: 75.57833333333332 - type: recall value: 82.6 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (swg-eng) config: swg-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 52.67857142857143 - type: f1 value: 44.302721088435376 - type: precision value: 41.49801587301587 - type: recall value: 52.67857142857143 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (arq-eng) config: arq-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 28.3205268935236 - type: f1 value: 22.426666605171157 - type: precision value: 20.685900116470915 - type: recall value: 28.3205268935236 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (kab-eng) config: kab-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 22.7 - type: f1 value: 17.833970473970474 - type: precision value: 16.407335164835164 - type: recall value: 22.7 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (fra-eng) config: fra-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.2 - type: f1 value: 89.92999999999999 - type: precision value: 88.87 - type: recall value: 92.2 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (por-eng) config: por-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 91.4 - type: f1 value: 89.25 - type: precision value: 88.21666666666667 - type: recall value: 91.4 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (tat-eng) config: tat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 69.19999999999999 - type: f1 value: 63.38269841269841 - type: precision value: 61.14773809523809 - type: recall value: 69.19999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (oci-eng) config: oci-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 48.8 - type: f1 value: 42.839915639915645 - type: precision value: 40.770287114845935 - type: recall value: 48.8 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (pol-eng) config: pol-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.8 - type: f1 value: 85.90666666666668 - type: precision value: 84.54166666666666 - type: recall value: 88.8 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (war-eng) config: war-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 46.6 - type: f1 value: 40.85892920804686 - type: precision value: 38.838223114604695 - type: recall value: 46.6 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (aze-eng) config: aze-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 84.0 - type: f1 value: 80.14190476190475 - type: precision value: 78.45333333333333 - type: recall value: 84.0 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (vie-eng) config: vie-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.5 - type: f1 value: 87.78333333333333 - type: precision value: 86.5 - type: recall value: 90.5 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (nno-eng) config: nno-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 74.5 - type: f1 value: 69.48397546897547 - type: precision value: 67.51869047619049 - type: recall value: 74.5 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (cha-eng) config: cha-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 32.846715328467155 - type: f1 value: 27.828177499710343 - type: precision value: 26.63451511991658 - type: recall value: 32.846715328467155 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (mhr-eng) config: mhr-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 8.0 - type: f1 value: 6.07664116764988 - type: precision value: 5.544177607179943 - type: recall value: 8.0 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (dan-eng) config: dan-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 87.6 - type: f1 value: 84.38555555555554 - type: precision value: 82.91583333333334 - type: recall value: 87.6 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ell-eng) config: ell-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 87.5 - type: f1 value: 84.08333333333331 - type: precision value: 82.47333333333333 - type: recall value: 87.5 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (amh-eng) config: amh-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 80.95238095238095 - type: f1 value: 76.13095238095238 - type: precision value: 74.05753968253967 - type: recall value: 80.95238095238095 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (pam-eng) config: pam-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 8.799999999999999 - type: f1 value: 6.971422975172975 - type: precision value: 6.557814916172301 - type: recall value: 8.799999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (hsb-eng) config: hsb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 44.099378881987576 - type: f1 value: 37.01649742022413 - type: precision value: 34.69420618488942 - type: recall value: 44.099378881987576 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (srp-eng) config: srp-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 84.3 - type: f1 value: 80.32666666666667 - type: precision value: 78.60666666666665 - type: recall value: 84.3 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (epo-eng) config: epo-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.5 - type: f1 value: 90.49666666666666 - type: precision value: 89.56666666666668 - type: recall value: 92.5 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (kzj-eng) config: kzj-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 10.0 - type: f1 value: 8.268423529875141 - type: precision value: 7.878118605532398 - type: recall value: 10.0 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (awa-eng) config: awa-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 79.22077922077922 - type: f1 value: 74.27128427128426 - type: precision value: 72.28715728715729 - type: recall value: 79.22077922077922 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (fao-eng) config: fao-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 65.64885496183206 - type: f1 value: 58.87495456197747 - type: precision value: 55.992366412213734 - type: recall value: 65.64885496183206 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (mal-eng) config: mal-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.06986899563319 - type: f1 value: 94.78408539543909 - type: precision value: 94.15332362930616 - type: recall value: 96.06986899563319 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ile-eng) config: ile-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 77.2 - type: f1 value: 71.72571428571428 - type: precision value: 69.41000000000001 - type: recall value: 77.2 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (bos-eng) config: bos-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 86.4406779661017 - type: f1 value: 83.2391713747646 - type: precision value: 81.74199623352166 - type: recall value: 86.4406779661017 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (cor-eng) config: cor-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 8.4 - type: f1 value: 6.017828743398003 - type: precision value: 5.4829865484756795 - type: recall value: 8.4 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (cat-eng) config: cat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 83.5 - type: f1 value: 79.74833333333333 - type: precision value: 78.04837662337664 - type: recall value: 83.5 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (eus-eng) config: eus-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 60.4 - type: f1 value: 54.467301587301584 - type: precision value: 52.23242424242424 - type: recall value: 60.4 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (yue-eng) config: yue-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 74.9 - type: f1 value: 69.68699134199134 - type: precision value: 67.59873015873016 - type: recall value: 74.9 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (swe-eng) config: swe-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.0 - type: f1 value: 84.9652380952381 - type: precision value: 83.66166666666666 - type: recall value: 88.0 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (dtp-eng) config: dtp-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 9.1 - type: f1 value: 7.681244588744588 - type: precision value: 7.370043290043291 - type: recall value: 9.1 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (kat-eng) config: kat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 80.9651474530831 - type: f1 value: 76.84220605132133 - type: precision value: 75.19606398962966 - type: recall value: 80.9651474530831 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (jpn-eng) config: jpn-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 86.9 - type: f1 value: 83.705 - type: precision value: 82.3120634920635 - type: recall value: 86.9 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (csb-eng) config: csb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 29.64426877470356 - type: f1 value: 23.98763072676116 - type: precision value: 22.506399397703746 - type: recall value: 29.64426877470356 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (xho-eng) config: xho-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 70.4225352112676 - type: f1 value: 62.84037558685445 - type: precision value: 59.56572769953053 - type: recall value: 70.4225352112676 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (orv-eng) config: orv-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 19.64071856287425 - type: f1 value: 15.125271011207756 - type: precision value: 13.865019261197494 - type: recall value: 19.64071856287425 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ind-eng) config: ind-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.2 - type: f1 value: 87.80666666666666 - type: precision value: 86.70833333333331 - type: recall value: 90.2 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (tuk-eng) config: tuk-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 23.15270935960591 - type: f1 value: 18.407224958949097 - type: precision value: 16.982385430661292 - type: recall value: 23.15270935960591 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (max-eng) config: max-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 55.98591549295775 - type: f1 value: 49.94718309859154 - type: precision value: 47.77864154624717 - type: recall value: 55.98591549295775 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (swh-eng) config: swh-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 73.07692307692307 - type: f1 value: 66.74358974358974 - type: precision value: 64.06837606837607 - type: recall value: 73.07692307692307 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (hin-eng) config: hin-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.89999999999999 - type: f1 value: 93.25 - type: precision value: 92.43333333333332 - type: recall value: 94.89999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (dsb-eng) config: dsb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 37.78705636743215 - type: f1 value: 31.63899658680452 - type: precision value: 29.72264397629742 - type: recall value: 37.78705636743215 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ber-eng) config: ber-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 21.6 - type: f1 value: 16.91697302697303 - type: precision value: 15.71225147075147 - type: recall value: 21.6 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (tam-eng) config: tam-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 85.01628664495115 - type: f1 value: 81.38514037536838 - type: precision value: 79.83170466883823 - type: recall value: 85.01628664495115 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (slk-eng) config: slk-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 83.39999999999999 - type: f1 value: 79.96380952380952 - type: precision value: 78.48333333333333 - type: recall value: 83.39999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (tgl-eng) config: tgl-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 83.2 - type: f1 value: 79.26190476190476 - type: precision value: 77.58833333333334 - type: recall value: 83.2 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ast-eng) config: ast-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 75.59055118110236 - type: f1 value: 71.66854143232096 - type: precision value: 70.30183727034121 - type: recall value: 75.59055118110236 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (mkd-eng) config: mkd-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 65.5 - type: f1 value: 59.26095238095238 - type: precision value: 56.81909090909092 - type: recall value: 65.5 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (khm-eng) config: khm-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 55.26315789473685 - type: f1 value: 47.986523325858506 - type: precision value: 45.33950006595436 - type: recall value: 55.26315789473685 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ces-eng) config: ces-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 82.89999999999999 - type: f1 value: 78.835 - type: precision value: 77.04761904761905 - type: recall value: 82.89999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (tzl-eng) config: tzl-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 43.269230769230774 - type: f1 value: 36.20421245421245 - type: precision value: 33.57371794871795 - type: recall value: 43.269230769230774 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (urd-eng) config: urd-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.0 - type: f1 value: 84.70666666666666 - type: precision value: 83.23166666666665 - type: recall value: 88.0 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ara-eng) config: ara-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 77.4 - type: f1 value: 72.54666666666667 - type: precision value: 70.54318181818181 - type: recall value: 77.4 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (kor-eng) config: kor-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 78.60000000000001 - type: f1 value: 74.1588888888889 - type: precision value: 72.30250000000001 - type: recall value: 78.60000000000001 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (yid-eng) config: yid-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 72.40566037735849 - type: f1 value: 66.82587328813744 - type: precision value: 64.75039308176099 - type: recall value: 72.40566037735849 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (fin-eng) config: fin-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 73.8 - type: f1 value: 68.56357142857144 - type: precision value: 66.3178822055138 - type: recall value: 73.8 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (tha-eng) config: tha-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 91.78832116788321 - type: f1 value: 89.3552311435523 - type: precision value: 88.20559610705597 - type: recall value: 91.78832116788321 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (wuu-eng) config: wuu-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 74.3 - type: f1 value: 69.05085581085581 - type: precision value: 66.955 - type: recall value: 74.3 - task: type: Retrieval dataset: type: webis-touche2020 name: MTEB Touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 2.896 - type: map_at_10 value: 8.993 - type: map_at_100 value: 14.133999999999999 - type: map_at_1000 value: 15.668000000000001 - type: map_at_3 value: 5.862 - type: map_at_5 value: 7.17 - type: mrr_at_1 value: 34.694 - type: mrr_at_10 value: 42.931000000000004 - type: mrr_at_100 value: 44.81 - type: mrr_at_1000 value: 44.81 - type: mrr_at_3 value: 38.435 - type: mrr_at_5 value: 41.701 - type: ndcg_at_1 value: 31.633 - type: ndcg_at_10 value: 21.163 - type: ndcg_at_100 value: 33.306000000000004 - type: ndcg_at_1000 value: 45.275999999999996 - type: ndcg_at_3 value: 25.685999999999996 - type: ndcg_at_5 value: 23.732 - type: precision_at_1 value: 34.694 - type: precision_at_10 value: 17.755000000000003 - type: precision_at_100 value: 6.938999999999999 - type: precision_at_1000 value: 1.48 - type: precision_at_3 value: 25.85 - type: precision_at_5 value: 23.265 - type: recall_at_1 value: 2.896 - type: recall_at_10 value: 13.333999999999998 - type: recall_at_100 value: 43.517 - type: recall_at_1000 value: 79.836 - type: recall_at_3 value: 6.306000000000001 - type: recall_at_5 value: 8.825 - task: type: Classification dataset: type: mteb/toxic_conversations_50k name: MTEB ToxicConversationsClassification config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 69.3874 - type: ap value: 13.829909072469423 - type: f1 value: 53.54534203543492 - task: type: Classification dataset: type: mteb/tweet_sentiment_extraction name: MTEB TweetSentimentExtractionClassification config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 62.62026032823995 - type: f1 value: 62.85251350485221 - task: type: Clustering dataset: type: mteb/twentynewsgroups-clustering name: MTEB TwentyNewsgroupsClustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 33.21527881409797 - task: type: PairClassification dataset: type: mteb/twittersemeval2015-pairclassification name: MTEB TwitterSemEval2015 config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 84.97943613280086 - type: cos_sim_ap value: 70.75454316885921 - type: cos_sim_f1 value: 65.38274012676743 - type: cos_sim_precision value: 60.761214318078835 - type: cos_sim_recall value: 70.76517150395777 - type: dot_accuracy value: 79.0546581629612 - type: dot_ap value: 47.3197121792147 - type: dot_f1 value: 49.20106524633821 - type: dot_precision value: 42.45499808502489 - type: dot_recall value: 58.49604221635884 - type: euclidean_accuracy value: 85.08076533349228 - type: euclidean_ap value: 70.95016106374474 - type: euclidean_f1 value: 65.43987900176455 - type: euclidean_precision value: 62.64478764478765 - type: euclidean_recall value: 68.49604221635884 - type: manhattan_accuracy value: 84.93771234428085 - type: manhattan_ap value: 70.63668388755362 - type: manhattan_f1 value: 65.23895401262398 - type: manhattan_precision value: 56.946084218811485 - type: manhattan_recall value: 76.35883905013192 - type: max_accuracy value: 85.08076533349228 - type: max_ap value: 70.95016106374474 - type: max_f1 value: 65.43987900176455 - task: type: PairClassification dataset: type: mteb/twitterurlcorpus-pairclassification name: MTEB TwitterURLCorpus config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 88.69096130709822 - type: cos_sim_ap value: 84.82526278228542 - type: cos_sim_f1 value: 77.65485060585536 - type: cos_sim_precision value: 75.94582658619167 - type: cos_sim_recall value: 79.44256236526024 - type: dot_accuracy value: 80.97954748321496 - type: dot_ap value: 64.81642914145866 - type: dot_f1 value: 60.631996987229975 - type: dot_precision value: 54.5897293631712 - type: dot_recall value: 68.17831844779796 - type: euclidean_accuracy value: 88.6987231730508 - type: euclidean_ap value: 84.80003825477253 - type: euclidean_f1 value: 77.67194179854496 - type: euclidean_precision value: 75.7128235122094 - type: euclidean_recall value: 79.73514012935017 - type: manhattan_accuracy value: 88.62692591298949 - type: manhattan_ap value: 84.80451408255276 - type: manhattan_f1 value: 77.69888949572183 - type: manhattan_precision value: 73.70311528631622 - type: manhattan_recall value: 82.15275639051433 - type: max_accuracy value: 88.6987231730508 - type: max_ap value: 84.82526278228542 - type: max_f1 value: 77.69888949572183 language: - multilingual - af - am - ar - as - az - be - bg - bn - br - bs - ca - cs - cy - da - de - el - en - eo - es - et - eu - fa - fi - fr - fy - ga - gd - gl - gu - ha - he - hi - hr - hu - hy - id - is - it - ja - jv - ka - kk - km - kn - ko - ku - ky - la - lo - lt - lv - mg - mk - ml - mn - mr - ms - my - ne - nl - 'no' - om - or - pa - pl - ps - pt - ro - ru - sa - sd - si - sk - sl - so - sq - sr - su - sv - sw - ta - te - th - tl - tr - ug - uk - ur - uz - vi - xh - yi - zh license: mit --- ## Multilingual-E5-small [Multilingual E5 Text Embeddings: A Technical Report](https://arxiv.org/pdf/2402.05672). Liang Wang, Nan Yang, Xiaolong Huang, Linjun Yang, Rangan Majumder, Furu Wei, arXiv 2024 This model has 12 layers and the embedding size is 384. ## Usage Below is an example to encode queries and passages from the MS-MARCO passage ranking dataset. ```python import torch.nn.functional as F from torch import Tensor from transformers import AutoTokenizer, AutoModel def average_pool(last_hidden_states: Tensor, attention_mask: Tensor) -> Tensor: last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0) return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] # Each input text should start with "query: " or "passage: ", even for non-English texts. # For tasks other than retrieval, you can simply use the "query: " prefix. input_texts = ['query: how much protein should a female eat', 'query: 南瓜的家常做法', "passage: As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.", "passage: 1.清炒南瓜丝 原料:嫩南瓜半个 调料:葱、盐、白糖、鸡精 做法: 1、南瓜用刀薄薄的削去表面一层皮,用勺子刮去瓤 2、擦成细丝(没有擦菜板就用刀慢慢切成细丝) 3、锅烧热放油,入葱花煸出香味 4、入南瓜丝快速翻炒一分钟左右,放盐、一点白糖和鸡精调味出锅 2.香葱炒南瓜 原料:南瓜1只 调料:香葱、蒜末、橄榄油、盐 做法: 1、将南瓜去皮,切成片 2、油锅8成热后,将蒜末放入爆香 3、爆香后,将南瓜片放入,翻炒 4、在翻炒的同时,可以不时地往锅里加水,但不要太多 5、放入盐,炒匀 6、南瓜差不多软和绵了之后,就可以关火 7、撒入香葱,即可出锅"] tokenizer = AutoTokenizer.from_pretrained('intfloat/multilingual-e5-small') model = AutoModel.from_pretrained('intfloat/multilingual-e5-small') # Tokenize the input texts batch_dict = tokenizer(input_texts, max_length=512, padding=True, truncation=True, return_tensors='pt') outputs = model(**batch_dict) embeddings = average_pool(outputs.last_hidden_state, batch_dict['attention_mask']) # normalize embeddings embeddings = F.normalize(embeddings, p=2, dim=1) scores = (embeddings[:2] @ embeddings[2:].T) * 100 print(scores.tolist()) ``` ## Supported Languages This model is initialized from [microsoft/Multilingual-MiniLM-L12-H384](https://huggingface.co/microsoft/Multilingual-MiniLM-L12-H384) and continually trained on a mixture of multilingual datasets. It supports 100 languages from xlm-roberta, but low-resource languages may see performance degradation. ## Training Details **Initialization**: [microsoft/Multilingual-MiniLM-L12-H384](https://huggingface.co/microsoft/Multilingual-MiniLM-L12-H384) **First stage**: contrastive pre-training with weak supervision | Dataset | Weak supervision | # of text pairs | |--------------------------------------------------------------------------------------------------------|---------------------------------------|-----------------| | Filtered [mC4](https://huggingface.co/datasets/mc4) | (title, page content) | 1B | | [CC News](https://huggingface.co/datasets/intfloat/multilingual_cc_news) | (title, news content) | 400M | | [NLLB](https://huggingface.co/datasets/allenai/nllb) | translation pairs | 2.4B | | [Wikipedia](https://huggingface.co/datasets/intfloat/wikipedia) | (hierarchical section title, passage) | 150M | | Filtered [Reddit](https://www.reddit.com/) | (comment, response) | 800M | | [S2ORC](https://github.com/allenai/s2orc) | (title, abstract) and citation pairs | 100M | | [Stackexchange](https://stackexchange.com/) | (question, answer) | 50M | | [xP3](https://huggingface.co/datasets/bigscience/xP3) | (input prompt, response) | 80M | | [Miscellaneous unsupervised SBERT data](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2) | - | 10M | **Second stage**: supervised fine-tuning | Dataset | Language | # of text pairs | |----------------------------------------------------------------------------------------|--------------|-----------------| | [MS MARCO](https://microsoft.github.io/msmarco/) | English | 500k | | [NQ](https://github.com/facebookresearch/DPR) | English | 70k | | [Trivia QA](https://github.com/facebookresearch/DPR) | English | 60k | | [NLI from SimCSE](https://github.com/princeton-nlp/SimCSE) | English | <300k | | [ELI5](https://huggingface.co/datasets/eli5) | English | 500k | | [DuReader Retrieval](https://github.com/baidu/DuReader/tree/master/DuReader-Retrieval) | Chinese | 86k | | [KILT Fever](https://huggingface.co/datasets/kilt_tasks) | English | 70k | | [KILT HotpotQA](https://huggingface.co/datasets/kilt_tasks) | English | 70k | | [SQuAD](https://huggingface.co/datasets/squad) | English | 87k | | [Quora](https://huggingface.co/datasets/quora) | English | 150k | | [Mr. TyDi](https://huggingface.co/datasets/castorini/mr-tydi) | 11 languages | 50k | | [MIRACL](https://huggingface.co/datasets/miracl/miracl) | 16 languages | 40k | For all labeled datasets, we only use its training set for fine-tuning. For other training details, please refer to our paper at [https://arxiv.org/pdf/2402.05672](https://arxiv.org/pdf/2402.05672). ## Benchmark Results on [Mr. TyDi](https://arxiv.org/abs/2108.08787) | Model | Avg MRR@10 | | ar | bn | en | fi | id | ja | ko | ru | sw | te | th | |-----------------------|------------|-------|------| --- | --- | --- | --- | --- | --- | --- |------| --- | --- | | BM25 | 33.3 | | 36.7 | 41.3 | 15.1 | 28.8 | 38.2 | 21.7 | 28.1 | 32.9 | 39.6 | 42.4 | 41.7 | | mDPR | 16.7 | | 26.0 | 25.8 | 16.2 | 11.3 | 14.6 | 18.1 | 21.9 | 18.5 | 7.3 | 10.6 | 13.5 | | BM25 + mDPR | 41.7 | | 49.1 | 53.5 | 28.4 | 36.5 | 45.5 | 35.5 | 36.2 | 42.7 | 40.5 | 42.0 | 49.2 | | | | | multilingual-e5-small | 64.4 | | 71.5 | 66.3 | 54.5 | 57.7 | 63.2 | 55.4 | 54.3 | 60.8 | 65.4 | 89.1 | 70.1 | | multilingual-e5-base | 65.9 | | 72.3 | 65.0 | 58.5 | 60.8 | 64.9 | 56.6 | 55.8 | 62.7 | 69.0 | 86.6 | 72.7 | | multilingual-e5-large | **70.5** | | 77.5 | 73.2 | 60.8 | 66.8 | 68.5 | 62.5 | 61.6 | 65.8 | 72.7 | 90.2 | 76.2 | ## MTEB Benchmark Evaluation Check out [unilm/e5](https://github.com/microsoft/unilm/tree/master/e5) to reproduce evaluation results on the [BEIR](https://arxiv.org/abs/2104.08663) and [MTEB benchmark](https://arxiv.org/abs/2210.07316). ## Support for Sentence Transformers Below is an example for usage with sentence_transformers. ```python from sentence_transformers import SentenceTransformer model = SentenceTransformer('intfloat/multilingual-e5-small') input_texts = [ 'query: how much protein should a female eat', 'query: 南瓜的家常做法', "passage: As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 i s 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or traini ng for a marathon. Check out the chart below to see how much protein you should be eating each day.", "passage: 1.清炒南瓜丝 原料:嫩南瓜半个 调料:葱、盐、白糖、鸡精 做法: 1、南瓜用刀薄薄的削去表面一层皮 ,用勺子刮去瓤 2、擦成细丝(没有擦菜板就用刀慢慢切成细丝) 3、锅烧热放油,入葱花煸出香味 4、入南瓜丝快速翻炒一分钟左右, 放盐、一点白糖和鸡精调味出锅 2.香葱炒南瓜 原料:南瓜1只 调料:香葱、蒜末、橄榄油、盐 做法: 1、将南瓜去皮,切成片 2、油 锅8成热后,将蒜末放入爆香 3、爆香后,将南瓜片放入,翻炒 4、在翻炒的同时,可以不时地往锅里加水,但不要太多 5、放入盐,炒匀 6、南瓜差不多软和绵了之后,就可以关火 7、撒入香葱,即可出锅" ] embeddings = model.encode(input_texts, normalize_embeddings=True) ``` Package requirements `pip install sentence_transformers~=2.2.2` Contributors: [michaelfeil](https://huggingface.co/michaelfeil) ## FAQ **1. Do I need to add the prefix "query: " and "passage: " to input texts?** Yes, this is how the model is trained, otherwise you will see a performance degradation. Here are some rules of thumb: - Use "query: " and "passage: " correspondingly for asymmetric tasks such as passage retrieval in open QA, ad-hoc information retrieval. - Use "query: " prefix for symmetric tasks such as semantic similarity, bitext mining, paraphrase retrieval. - Use "query: " prefix if you want to use embeddings as features, such as linear probing classification, clustering. **2. Why are my reproduced results slightly different from reported in the model card?** Different versions of `transformers` and `pytorch` could cause negligible but non-zero performance differences. **3. Why does the cosine similarity scores distribute around 0.7 to 1.0?** This is a known and expected behavior as we use a low temperature 0.01 for InfoNCE contrastive loss. For text embedding tasks like text retrieval or semantic similarity, what matters is the relative order of the scores instead of the absolute values, so this should not be an issue. ## Citation If you find our paper or models helpful, please consider cite as follows: ``` @article{wang2024multilingual, title={Multilingual E5 Text Embeddings: A Technical Report}, author={Wang, Liang and Yang, Nan and Huang, Xiaolong and Yang, Linjun and Majumder, Rangan and Wei, Furu}, journal={arXiv preprint arXiv:2402.05672}, year={2024} } ``` ## Limitations Long texts will be truncated to at most 512 tokens.
ybelkada/tiny-random-T5ForConditionalGeneration-calibrated
ybelkada
"2023-04-05T17:16:54Z"
445,133
0
transformers
[ "transformers", "pytorch", "t5", "text2text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text2text-generation
"2023-04-05T17:13:33Z"
A "better calibrated" tiny T5 model for testing purposes
diffusers/stable-diffusion-xl-1.0-inpainting-0.1
diffusers
"2023-09-03T16:36:39Z"
443,549
224
diffusers
[ "diffusers", "safetensors", "stable-diffusion-xl", "stable-diffusion-xl-diffusers", "text-to-image", "inpainting", "arxiv:2112.10752", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "license:openrail++", "has_space", "diffusers:StableDiffusionXLInpaintPipeline", "region:us" ]
text-to-image
"2023-09-01T14:07:10Z"
--- license: openrail++ base_model: stabilityai/stable-diffusion-xl-base-1.0 tags: - stable-diffusion-xl - stable-diffusion-xl-diffusers - text-to-image - diffusers - inpainting inference: false --- # SD-XL Inpainting 0.1 Model Card ![inpaint-example](inpaint-examples-min.png) SD-XL Inpainting 0.1 is a latent text-to-image diffusion model capable of generating photo-realistic images given any text input, with the extra capability of inpainting the pictures by using a mask. The SD-XL Inpainting 0.1 was initialized with the `stable-diffusion-xl-base-1.0` weights. The model is trained for 40k steps at resolution 1024x1024 and 5% dropping of the text-conditioning to improve classifier-free classifier-free guidance sampling. For inpainting, the UNet has 5 additional input channels (4 for the encoded masked-image and 1 for the mask itself) whose weights were zero-initialized after restoring the non-inpainting checkpoint. During training, we generate synthetic masks and, in 25% mask everything. ## How to use ```py from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image import torch pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to("cuda") img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" image = load_image(img_url).resize((1024, 1024)) mask_image = load_image(mask_url).resize((1024, 1024)) prompt = "a tiger sitting on a park bench" generator = torch.Generator(device="cuda").manual_seed(0) image = pipe( prompt=prompt, image=image, mask_image=mask_image, guidance_scale=8.0, num_inference_steps=20, # steps between 15 and 30 work well for us strength=0.99, # make sure to use `strength` below 1.0 generator=generator, ).images[0] ``` **How it works:** `image` | `mask_image` :-------------------------:|:-------------------------:| <img src="https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" alt="drawing" width="300"/> | <img src="https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" alt="drawing" width="300"/> `prompt` | `Output` :-------------------------:|:-------------------------:| <span style="position: relative;bottom: 150px;">a tiger sitting on a park bench</span> | <img src="https://huggingface.co/datasets/valhalla/images/resolve/main/tiger.png" alt="drawing" width="300"/> ## Model Description - **Developed by:** The Diffusers team - **Model type:** Diffusion-based text-to-image generative model - **License:** [CreativeML Open RAIL++-M License](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md) - **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a [Latent Diffusion Model](https://arxiv.org/abs/2112.10752) that uses two fixed, pretrained text encoders ([OpenCLIP-ViT/G](https://github.com/mlfoundations/open_clip) and [CLIP-ViT/L](https://github.com/openai/CLIP/tree/main)). ## Uses ### Direct Use The model is intended for research purposes only. Possible research areas and tasks include - Generation of artworks and use in design and other artistic processes. - Applications in educational or creative tools. - Research on generative models. - Safe deployment of models which have the potential to generate harmful content. - Probing and understanding the limitations and biases of generative models. Excluded uses are described below. ### Out-of-Scope Use The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model. ## Limitations and Bias ### Limitations - The model does not achieve perfect photorealism - The model cannot render legible text - The model struggles with more difficult tasks which involve compositionality, such as rendering an image corresponding to “A red cube on top of a blue sphere” - Faces and people in general may not be generated properly. - The autoencoding part of the model is lossy. - When the strength parameter is set to 1 (i.e. starting in-painting from a fully masked image), the quality of the image is degraded. The model retains the non-masked contents of the image, but images look less sharp. We're investing this and working on the next version. ### Bias While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases.
tiiuae/falcon-rw-1b
tiiuae
"2023-07-12T21:34:11Z"
437,948
96
transformers
[ "transformers", "pytorch", "falcon", "text-generation", "custom_code", "en", "dataset:tiiuae/falcon-refinedweb", "arxiv:2306.01116", "arxiv:2005.14165", "arxiv:2108.12409", "arxiv:2205.14135", "license:apache-2.0", "autotrain_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2023-04-26T09:25:36Z"
--- datasets: - tiiuae/falcon-refinedweb language: - en inference: false license: apache-2.0 --- # Falcon-RW-1B **Falcon-RW-1B is a 1B parameters causal decoder-only model built by [TII](https://www.tii.ae) and trained on 350B tokens of [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb). It is made available under the Apache 2.0 license.** See the 📓 [paper on arXiv](https://arxiv.org/abs/2306.01116) for more details. RefinedWeb is a high-quality web dataset built by leveraging stringent filtering and large-scale deduplication. Falcon-RW-1B, trained on RefinedWeb only, matches or outperforms comparable models trained on curated data. ⚠️ Falcon is now available as a core model in the `transformers` library! To use the in-library version, please install the latest version of `transformers` with `pip install git+https://github.com/huggingface/transformers.git`, then simply remove the `trust_remote_code=True` argument from `from_pretrained()`. ⚠️ This model is intended for use as a **research artifact**, to study the influence of training on web data alone. **If you are interested in state-of-the-art models, we recommend using Falcon-[7B](https://huggingface.co/tiiuae/falcon-7b)/[40B](https://huggingface.co/tiiuae/falcon-40b), both trained on >1,000 billion tokens.** ```python from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model = "tiiuae/falcon-rw-1b" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, device_map="auto", ) sequences = pipeline( "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", max_length=200, do_sample=True, top_k=10, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` 💥 **Falcon LLMs require PyTorch 2.0 for use with `transformers`!** # Model Card for Falcon-RW-1B ## Model Details ### Model Description - **Developed by:** [https://www.tii.ae](https://www.tii.ae); - **Model type:** Causal decoder-only; - **Language(s) (NLP):** English; - **License:** Apache 2.0. ### Model Source - **Paper:** [https://arxiv.org/abs/2306.01116](https://arxiv.org/abs/2306.01116). ## Uses ### Direct Use Research on large language models, specifically the influence of adequately filtered and deduplicated web data on the properties of large language models (fairness, safety, limitations, capabilities, etc.). ### Out-of-Scope Use Production use without adequate assessment of risks and mitigation; any use cases which may be considered irresponsible or harmful. Broadly speaking, we would recommend Falcon-[7B](https://huggingface.co/tiiuae/falcon-7b)/[40B](https://huggingface.co/tiiuae/falcon-40b) for any use not directly related to research on web data pipelines. ## Bias, Risks, and Limitations Falcon-RW-1B is trained on English data only, and will not generalize appropriately to other languages. Furthermore, as it is trained on a large-scale corpora representative of the web, it will carry the stereotypes and biases commonly encountered online. ### Recommendations We recommend users of Falcon-RW-1B to consider finetuning it for the specific set of tasks of interest, and for guardrails and appropriate precautions to be taken for any production use. ## How to Get Started with the Model ```python from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model = "tiiuae/falcon-rw-1b" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, device_map="auto", ) sequences = pipeline( "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", max_length=200, do_sample=True, top_k=10, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` ## Training Details ### Training Data Falcon-RW-1B was trained on 350B tokens of [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb), a high-quality filtered and deduplicated web dataset. The data was tokenized with the GPT-2 tokenizer. ### Training Procedure Falcon-RW-1B was trained on 32 A100 40GB GPUs, using only data parallelism with ZeRO. #### Training Hyperparameters Hyperparameters were adapted from the GPT-3 paper ([Brown et al., 2020](https://arxiv.org/abs/2005.14165)). | **Hyperparameter** | **Value** | **Comment** | |--------------------|------------|-------------------------------------------| | Precision | `bfloat16` | | | Optimizer | AdamW | | | Learning rate | 2e-4 | 500M tokens warm-up, cosine decay to 2e-5 | | Weight decay | 1e-1 | | | Batch size | 512 | 4B tokens ramp-up | #### Speeds, Sizes, Times Training happened in early December 2022 and took about six days. ## Evaluation See the 📓 [paper on arXiv](https://arxiv.org/abs/2306.01116) for in-depth evaluation. ## Technical Specifications ### Model Architecture and Objective Falcon-RW-1B is a causal decoder-only model trained on a causal language modeling task (i.e., predict the next token). The architecture is adapted from the GPT-3 paper ([Brown et al., 2020](https://arxiv.org/abs/2005.14165)), but uses ALiBi ([Ofir et al., 2021](https://arxiv.org/abs/2108.12409)) and FlashAttention ([Dao et al., 2022](https://arxiv.org/abs/2205.14135)). | **Hyperparameter** | **Value** | **Comment** | |--------------------|-----------|----------------------------------------| | Layers | 24 | | | `d_model` | 2048 | | | `head_dim` | 64 | Reduced to optimise for FlashAttention | | Vocabulary | 50304 | | | Sequence length | 2048 | | ### Compute Infrastructure #### Hardware Falcon-RW-1B was trained on AWS SageMaker, on 32 A100 40GB GPUs in P4d instances. #### Software Falcon-RW-1B was trained a custom distributed training codebase, Gigatron. It uses a 3D parallelism approach combined with ZeRO and high-performance Triton kernels (FlashAttention, etc.) ## Citation ``` @article{refinedweb, title={The {R}efined{W}eb dataset for {F}alcon {LLM}: outperforming curated corpora with web data, and web data only}, author={Guilherme Penedo and Quentin Malartic and Daniel Hesslow and Ruxandra Cojocaru and Alessandro Cappelli and Hamza Alobeidli and Baptiste Pannier and Ebtesam Almazrouei and Julien Launay}, journal={arXiv preprint arXiv:2306.01116}, eprint={2306.01116}, eprinttype = {arXiv}, url={https://arxiv.org/abs/2306.01116}, year={2023} } ``` ## Contact falconllm@tii.ae
sshleifer/distilbart-cnn-12-6
sshleifer
"2021-06-14T07:51:12Z"
437,189
226
transformers
[ "transformers", "pytorch", "jax", "rust", "bart", "text2text-generation", "summarization", "en", "dataset:cnn_dailymail", "dataset:xsum", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
summarization
"2022-03-02T23:29:05Z"
--- language: en tags: - summarization license: apache-2.0 datasets: - cnn_dailymail - xsum thumbnail: https://huggingface.co/front/thumbnails/distilbart_medium.png --- ### Usage This checkpoint should be loaded into `BartForConditionalGeneration.from_pretrained`. See the [BART docs](https://huggingface.co/transformers/model_doc/bart.html?#transformers.BartForConditionalGeneration) for more information. ### Metrics for DistilBART models | Model Name | MM Params | Inference Time (MS) | Speedup | Rouge 2 | Rouge-L | |:---------------------------|------------:|----------------------:|----------:|----------:|----------:| | distilbart-xsum-12-1 | 222 | 90 | 2.54 | 18.31 | 33.37 | | distilbart-xsum-6-6 | 230 | 132 | 1.73 | 20.92 | 35.73 | | distilbart-xsum-12-3 | 255 | 106 | 2.16 | 21.37 | 36.39 | | distilbart-xsum-9-6 | 268 | 136 | 1.68 | 21.72 | 36.61 | | bart-large-xsum (baseline) | 406 | 229 | 1 | 21.85 | 36.50 | | distilbart-xsum-12-6 | 306 | 137 | 1.68 | 22.12 | 36.99 | | bart-large-cnn (baseline) | 406 | 381 | 1 | 21.06 | 30.63 | | distilbart-12-3-cnn | 255 | 214 | 1.78 | 20.57 | 30.00 | | distilbart-12-6-cnn | 306 | 307 | 1.24 | 21.26 | 30.59 | | distilbart-6-6-cnn | 230 | 182 | 2.09 | 20.17 | 29.70 |
microsoft/wavlm-base
microsoft
"2021-12-22T17:23:36Z"
436,939
4
transformers
[ "transformers", "pytorch", "wavlm", "feature-extraction", "speech", "en", "arxiv:2110.13900", "has_space", "region:us" ]
feature-extraction
"2022-03-02T23:29:05Z"
--- language: - en datasets: tags: - speech inference: false --- # WavLM-Base [Microsoft's WavLM](https://github.com/microsoft/unilm/tree/master/wavlm) The base model pretrained on 16kHz sampled speech audio. When using the model, make sure that your speech input is also sampled at 16kHz. **Note**: This model does not have a tokenizer as it was pretrained on audio alone. In order to use this model **speech recognition**, a tokenizer should be created and the model should be fine-tuned on labeled text data. Check out [this blog](https://huggingface.co/blog/fine-tune-wav2vec2-english) for more in-detail explanation of how to fine-tune the model. The model was pre-trained on 960h of [Librispeech](https://huggingface.co/datasets/librispeech_asr). [Paper: WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) Authors: Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei **Abstract** *Self-supervised learning (SSL) achieves great success in speech recognition, while limited exploration has been attempted for other speech processing tasks. As speech signal contains multi-faceted information including speaker identity, paralinguistics, spoken content, etc., learning universal representations for all speech tasks is challenging. In this paper, we propose a new pre-trained model, WavLM, to solve full-stack downstream speech tasks. WavLM is built based on the HuBERT framework, with an emphasis on both spoken content modeling and speaker identity preservation. We first equip the Transformer structure with gated relative position bias to improve its capability on recognition tasks. For better speaker discrimination, we propose an utterance mixing training strategy, where additional overlapped utterances are created unsupervisely and incorporated during model training. Lastly, we scale up the training dataset from 60k hours to 94k hours. WavLM Large achieves state-of-the-art performance on the SUPERB benchmark, and brings significant improvements for various speech processing tasks on their representative benchmarks.* The original model can be found under https://github.com/microsoft/unilm/tree/master/wavlm. # Usage This is an English pre-trained speech model that has to be fine-tuned on a downstream task like speech recognition or audio classification before it can be used in inference. The model was pre-trained in English and should therefore perform well only in English. The model has been shown to work well on the [SUPERB benchmark](https://superbbenchmark.org/). **Note**: The model was pre-trained on phonemes rather than characters. This means that one should make sure that the input text is converted to a sequence of phonemes before fine-tuning. ## Speech Recognition To fine-tune the model for speech recognition, see [the official speech recognition example](https://github.com/huggingface/transformers/tree/master/examples/pytorch/speech-recognition). ## Speech Classification To fine-tune the model for speech classification, see [the official audio classification example](https://github.com/huggingface/transformers/tree/master/examples/pytorch/audio-classification). ## Speaker Verification TODO ## Speaker Diarization TODO # Contribution The model was contributed by [cywang](https://huggingface.co/cywang) and [patrickvonplaten](https://huggingface.co/patrickvonplaten). # License The official license can be found [here](https://github.com/microsoft/UniSpeech/blob/main/LICENSE) ![design](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/wavlm.png)
dbmdz/bert-base-turkish-cased
dbmdz
"2024-02-20T23:36:11Z"
435,845
65
transformers
[ "transformers", "pytorch", "tf", "jax", "bert", "tr", "license:mit", "endpoints_compatible", "has_space", "region:us" ]
null
"2022-03-02T23:29:05Z"
--- language: tr license: mit --- # 🤗 + 📚 dbmdz Turkish BERT model In this repository the MDZ Digital Library team (dbmdz) at the Bavarian State Library open sources a cased model for Turkish 🎉 # 🇹🇷 BERTurk BERTurk is a community-driven cased BERT model for Turkish. Some datasets used for pretraining and evaluation are contributed from the awesome Turkish NLP community, as well as the decision for the model name: BERTurk. ## Stats The current version of the model is trained on a filtered and sentence segmented version of the Turkish [OSCAR corpus](https://traces1.inria.fr/oscar/), a recent Wikipedia dump, various [OPUS corpora](http://opus.nlpl.eu/) and a special corpus provided by [Kemal Oflazer](http://www.andrew.cmu.edu/user/ko/). The final training corpus has a size of 35GB and 44,04,976,662 tokens. Thanks to Google's TensorFlow Research Cloud (TFRC) we could train a cased model on a TPU v3-8 for 2M steps. ## Model weights Currently only PyTorch-[Transformers](https://github.com/huggingface/transformers) compatible weights are available. If you need access to TensorFlow checkpoints, please raise an issue! | Model | Downloads | --------------------------------- | --------------------------------------------------------------------------------------------------------------- | `dbmdz/bert-base-turkish-cased` | [`config.json`](https://cdn.huggingface.co/dbmdz/bert-base-turkish-cased/config.json) • [`pytorch_model.bin`](https://cdn.huggingface.co/dbmdz/bert-base-turkish-cased/pytorch_model.bin) • [`vocab.txt`](https://cdn.huggingface.co/dbmdz/bert-base-turkish-cased/vocab.txt) ## Usage With Transformers >= 2.3 our BERTurk cased model can be loaded like: ```python from transformers import AutoModel, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("dbmdz/bert-base-turkish-cased") model = AutoModel.from_pretrained("dbmdz/bert-base-turkish-cased") ``` ## Results For results on PoS tagging or NER tasks, please refer to [this repository](https://github.com/stefan-it/turkish-bert). # Huggingface model hub All models are available on the [Huggingface model hub](https://huggingface.co/dbmdz). # Contact (Bugs, Feedback, Contribution and more) For questions about our BERT models just open an issue [here](https://github.com/dbmdz/berts/issues/new) 🤗 # Acknowledgments Thanks to [Kemal Oflazer](http://www.andrew.cmu.edu/user/ko/) for providing us additional large corpora for Turkish. Many thanks to Reyyan Yeniterzi for providing us the Turkish NER dataset for evaluation. Research supported with Cloud TPUs from Google's TensorFlow Research Cloud (TFRC). Thanks for providing access to the TFRC ❤️ Thanks to the generous support from the [Hugging Face](https://huggingface.co/) team, it is possible to download both cased and uncased models from their S3 storage 🤗
facebook/mms-1b-all
facebook
"2023-06-15T10:45:44Z"
429,924
84
transformers
[ "transformers", "pytorch", "safetensors", "wav2vec2", "automatic-speech-recognition", "mms", "ab", "af", "ak", "am", "ar", "as", "av", "ay", "az", "ba", "bm", "be", "bn", "bi", "bo", "sh", "br", "bg", "ca", "cs", "ce", "cv", "ku", "cy", "da", "de", "dv", "dz", "el", "en", "eo", "et", "eu", "ee", "fo", "fa", "fj", "fi", "fr", "fy", "ff", "ga", "gl", "gn", "gu", "zh", "ht", "ha", "he", "hi", "hu", "hy", "ig", "ia", "ms", "is", "it", "jv", "ja", "kn", "ka", "kk", "kr", "km", "ki", "rw", "ky", "ko", "kv", "lo", "la", "lv", "ln", "lt", "lb", "lg", "mh", "ml", "mr", "mk", "mg", "mt", "mn", "mi", "my", "nl", "no", "ne", "ny", "oc", "om", "or", "os", "pa", "pl", "pt", "ps", "qu", "ro", "rn", "ru", "sg", "sk", "sl", "sm", "sn", "sd", "so", "es", "sq", "su", "sv", "sw", "ta", "tt", "te", "tg", "tl", "th", "ti", "ts", "tr", "uk", "vi", "wo", "xh", "yo", "zu", "za", "dataset:google/fleurs", "arxiv:2305.13516", "license:cc-by-nc-4.0", "endpoints_compatible", "has_space", "region:us" ]
automatic-speech-recognition
"2023-05-27T11:43:21Z"
--- tags: - mms language: - ab - af - ak - am - ar - as - av - ay - az - ba - bm - be - bn - bi - bo - sh - br - bg - ca - cs - ce - cv - ku - cy - da - de - dv - dz - el - en - eo - et - eu - ee - fo - fa - fj - fi - fr - fy - ff - ga - gl - gn - gu - zh - ht - ha - he - hi - sh - hu - hy - ig - ia - ms - is - it - jv - ja - kn - ka - kk - kr - km - ki - rw - ky - ko - kv - lo - la - lv - ln - lt - lb - lg - mh - ml - mr - ms - mk - mg - mt - mn - mi - my - zh - nl - 'no' - 'no' - ne - ny - oc - om - or - os - pa - pl - pt - ms - ps - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - ro - rn - ru - sg - sk - sl - sm - sn - sd - so - es - sq - su - sv - sw - ta - tt - te - tg - tl - th - ti - ts - tr - uk - ms - vi - wo - xh - ms - yo - ms - zu - za license: cc-by-nc-4.0 datasets: - google/fleurs metrics: - wer --- # Massively Multilingual Speech (MMS) - Finetuned ASR - ALL This checkpoint is a model fine-tuned for multi-lingual ASR and part of Facebook's [Massive Multilingual Speech project](https://research.facebook.com/publications/scaling-speech-technology-to-1000-languages/). This checkpoint is based on the [Wav2Vec2 architecture](https://huggingface.co/docs/transformers/model_doc/wav2vec2) and makes use of adapter models to transcribe 1000+ languages. The checkpoint consists of **1 billion parameters** and has been fine-tuned from [facebook/mms-1b](https://huggingface.co/facebook/mms-1b) on 1162 languages. ## Table Of Content - [Example](#example) - [Supported Languages](#supported-languages) - [Model details](#model-details) - [Additional links](#additional-links) ## Example This MMS checkpoint can be used with [Transformers](https://github.com/huggingface/transformers) to transcribe audio of 1107 different languages. Let's look at a simple example. First, we install transformers and some other libraries ``` pip install torch accelerate torchaudio datasets pip install --upgrade transformers ```` **Note**: In order to use MMS you need to have at least `transformers >= 4.30` installed. If the `4.30` version is not yet available [on PyPI](https://pypi.org/project/transformers/) make sure to install `transformers` from source: ``` pip install git+https://github.com/huggingface/transformers.git ``` Next, we load a couple of audio samples via `datasets`. Make sure that the audio data is sampled to 16000 kHz. ```py from datasets import load_dataset, Audio # English stream_data = load_dataset("mozilla-foundation/common_voice_13_0", "en", split="test", streaming=True) stream_data = stream_data.cast_column("audio", Audio(sampling_rate=16000)) en_sample = next(iter(stream_data))["audio"]["array"] # French stream_data = load_dataset("mozilla-foundation/common_voice_13_0", "fr", split="test", streaming=True) stream_data = stream_data.cast_column("audio", Audio(sampling_rate=16000)) fr_sample = next(iter(stream_data))["audio"]["array"] ``` Next, we load the model and processor ```py from transformers import Wav2Vec2ForCTC, AutoProcessor import torch model_id = "facebook/mms-1b-all" processor = AutoProcessor.from_pretrained(model_id) model = Wav2Vec2ForCTC.from_pretrained(model_id) ``` Now we process the audio data, pass the processed audio data to the model and transcribe the model output, just like we usually do for Wav2Vec2 models such as [facebook/wav2vec2-base-960h](https://huggingface.co/facebook/wav2vec2-base-960h) ```py inputs = processor(en_sample, sampling_rate=16_000, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs).logits ids = torch.argmax(outputs, dim=-1)[0] transcription = processor.decode(ids) # 'joe keton disapproved of films and buster also had reservations about the media' ``` We can now keep the same model in memory and simply switch out the language adapters by calling the convenient [`load_adapter()`]() function for the model and [`set_target_lang()`]() for the tokenizer. We pass the target language as an input - "fra" for French. ```py processor.tokenizer.set_target_lang("fra") model.load_adapter("fra") inputs = processor(fr_sample, sampling_rate=16_000, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs).logits ids = torch.argmax(outputs, dim=-1)[0] transcription = processor.decode(ids) # "ce dernier est volé tout au long de l'histoire romaine" ``` In the same way the language can be switched out for all other supported languages. Please have a look at: ```py processor.tokenizer.vocab.keys() ``` For more details, please have a look at [the official docs](https://huggingface.co/docs/transformers/main/en/model_doc/mms). ## Supported Languages This model supports 1162 languages. Unclick the following to toogle all supported languages of this checkpoint in [ISO 639-3 code](https://en.wikipedia.org/wiki/ISO_639-3). You can find more details about the languages and their ISO 649-3 codes in the [MMS Language Coverage Overview](https://dl.fbaipublicfiles.com/mms/misc/language_coverage_mms.html). <details> <summary>Click to toggle</summary> - abi - abk - abp - aca - acd - ace - acf - ach - acn - acr - acu - ade - adh - adj - adx - aeu - afr - agd - agg - agn - agr - agu - agx - aha - ahk - aia - aka - akb - ake - akp - alj - alp - alt - alz - ame - amf - amh - ami - amk - ann - any - aoz - apb - apr - ara - arl - asa - asg - asm - ast - ata - atb - atg - ati - atq - ava - avn - avu - awa - awb - ayo - ayr - ayz - azb - azg - azj-script_cyrillic - azj-script_latin - azz - bak - bam - ban - bao - bas - bav - bba - bbb - bbc - bbo - bcc-script_arabic - bcc-script_latin - bcl - bcw - bdg - bdh - bdq - bdu - bdv - beh - bel - bem - ben - bep - bex - bfa - bfo - bfy - bfz - bgc - bgq - bgr - bgt - bgw - bha - bht - bhz - bib - bim - bis - biv - bjr - bjv - bjw - bjz - bkd - bkv - blh - blt - blx - blz - bmq - bmr - bmu - bmv - bng - bno - bnp - boa - bod - boj - bom - bor - bos - bov - box - bpr - bps - bqc - bqi - bqj - bqp - bre - bru - bsc - bsq - bss - btd - bts - btt - btx - bud - bul - bus - bvc - bvz - bwq - bwu - byr - bzh - bzi - bzj - caa - cab - cac-dialect_sanmateoixtatan - cac-dialect_sansebastiancoatan - cak-dialect_central - cak-dialect_santamariadejesus - cak-dialect_santodomingoxenacoj - cak-dialect_southcentral - cak-dialect_western - cak-dialect_yepocapa - cap - car - cas - cat - cax - cbc - cbi - cbr - cbs - cbt - cbu - cbv - cce - cco - cdj - ceb - ceg - cek - ces - cfm - cgc - che - chf - chv - chz - cjo - cjp - cjs - ckb - cko - ckt - cla - cle - cly - cme - cmn-script_simplified - cmo-script_khmer - cmo-script_latin - cmr - cnh - cni - cnl - cnt - coe - cof - cok - con - cot - cou - cpa - cpb - cpu - crh - crk-script_latin - crk-script_syllabics - crn - crq - crs - crt - csk - cso - ctd - ctg - cto - ctu - cuc - cui - cuk - cul - cwa - cwe - cwt - cya - cym - daa - dah - dan - dar - dbj - dbq - ddn - ded - des - deu - dga - dgi - dgk - dgo - dgr - dhi - did - dig - dik - dip - div - djk - dnj-dialect_blowowest - dnj-dialect_gweetaawueast - dnt - dnw - dop - dos - dsh - dso - dtp - dts - dug - dwr - dyi - dyo - dyu - dzo - eip - eka - ell - emp - enb - eng - enx - epo - ese - ess - est - eus - evn - ewe - eza - fal - fao - far - fas - fij - fin - flr - fmu - fon - fra - frd - fry - ful - gag-script_cyrillic - gag-script_latin - gai - gam - gau - gbi - gbk - gbm - gbo - gde - geb - gej - gil - gjn - gkn - gld - gle - glg - glk - gmv - gna - gnd - gng - gof-script_latin - gog - gor - gqr - grc - gri - grn - grt - gso - gub - guc - gud - guh - guj - guk - gum - guo - guq - guu - gux - gvc - gvl - gwi - gwr - gym - gyr - had - hag - hak - hap - hat - hau - hay - heb - heh - hif - hig - hil - hin - hlb - hlt - hne - hnn - hns - hoc - hoy - hrv - hsb - hto - hub - hui - hun - hus-dialect_centralveracruz - hus-dialect_westernpotosino - huu - huv - hvn - hwc - hye - hyw - iba - ibo - icr - idd - ifa - ifb - ife - ifk - ifu - ify - ign - ikk - ilb - ilo - imo - ina - inb - ind - iou - ipi - iqw - iri - irk - isl - ita - itl - itv - ixl-dialect_sangasparchajul - ixl-dialect_sanjuancotzal - ixl-dialect_santamarianebaj - izr - izz - jac - jam - jav - jbu - jen - jic - jiv - jmc - jmd - jpn - jun - juy - jvn - kaa - kab - kac - kak - kam - kan - kao - kaq - kat - kay - kaz - kbo - kbp - kbq - kbr - kby - kca - kcg - kdc - kde - kdh - kdi - kdj - kdl - kdn - kdt - kea - kek - ken - keo - ker - key - kez - kfb - kff-script_telugu - kfw - kfx - khg - khm - khq - kia - kij - kik - kin - kir - kjb - kje - kjg - kjh - kki - kkj - kle - klu - klv - klw - kma - kmd - kml - kmr-script_arabic - kmr-script_cyrillic - kmr-script_latin - kmu - knb - kne - knf - knj - knk - kno - kog - kor - kpq - kps - kpv - kpy - kpz - kqe - kqp - kqr - kqy - krc - kri - krj - krl - krr - krs - kru - ksb - ksr - kss - ktb - ktj - kub - kue - kum - kus - kvn - kvw - kwd - kwf - kwi - kxc - kxf - kxm - kxv - kyb - kyc - kyf - kyg - kyo - kyq - kyu - kyz - kzf - lac - laj - lam - lao - las - lat - lav - law - lbj - lbw - lcp - lee - lef - lem - lew - lex - lgg - lgl - lhu - lia - lid - lif - lin - lip - lis - lit - lje - ljp - llg - lln - lme - lnd - lns - lob - lok - lom - lon - loq - lsi - lsm - ltz - luc - lug - luo - lwo - lww - lzz - maa-dialect_sanantonio - maa-dialect_sanjeronimo - mad - mag - mah - mai - maj - mak - mal - mam-dialect_central - mam-dialect_northern - mam-dialect_southern - mam-dialect_western - maq - mar - maw - maz - mbb - mbc - mbh - mbj - mbt - mbu - mbz - mca - mcb - mcd - mco - mcp - mcq - mcu - mda - mdf - mdv - mdy - med - mee - mej - men - meq - met - mev - mfe - mfh - mfi - mfk - mfq - mfy - mfz - mgd - mge - mgh - mgo - mhi - mhr - mhu - mhx - mhy - mib - mie - mif - mih - mil - mim - min - mio - mip - miq - mit - miy - miz - mjl - mjv - mkd - mkl - mkn - mlg - mlt - mmg - mnb - mnf - mnk - mnw - mnx - moa - mog - mon - mop - mor - mos - mox - moz - mpg - mpm - mpp - mpx - mqb - mqf - mqj - mqn - mri - mrw - msy - mtd - mtj - mto - muh - mup - mur - muv - muy - mvp - mwq - mwv - mxb - mxq - mxt - mxv - mya - myb - myk - myl - myv - myx - myy - mza - mzi - mzj - mzk - mzm - mzw - nab - nag - nan - nas - naw - nca - nch - ncj - ncl - ncu - ndj - ndp - ndv - ndy - ndz - neb - new - nfa - nfr - nga - ngl - ngp - ngu - nhe - nhi - nhu - nhw - nhx - nhy - nia - nij - nim - nin - nko - nlc - nld - nlg - nlk - nmz - nnb - nno - nnq - nnw - noa - nob - nod - nog - not - npi - npl - npy - nso - nst - nsu - ntm - ntr - nuj - nus - nuz - nwb - nxq - nya - nyf - nyn - nyo - nyy - nzi - obo - oci - ojb-script_latin - ojb-script_syllabics - oku - old - omw - onb - ood - orm - ory - oss - ote - otq - ozm - pab - pad - pag - pam - pan - pao - pap - pau - pbb - pbc - pbi - pce - pcm - peg - pez - pib - pil - pir - pis - pjt - pkb - pls - plw - pmf - pny - poh-dialect_eastern - poh-dialect_western - poi - pol - por - poy - ppk - pps - prf - prk - prt - pse - pss - ptu - pui - pus - pwg - pww - pxm - qub - quc-dialect_central - quc-dialect_east - quc-dialect_north - quf - quh - qul - quw - quy - quz - qvc - qve - qvh - qvm - qvn - qvo - qvs - qvw - qvz - qwh - qxh - qxl - qxn - qxo - qxr - rah - rai - rap - rav - raw - rej - rel - rgu - rhg - rif-script_arabic - rif-script_latin - ril - rim - rjs - rkt - rmc-script_cyrillic - rmc-script_latin - rmo - rmy-script_cyrillic - rmy-script_latin - rng - rnl - roh-dialect_sursilv - roh-dialect_vallader - rol - ron - rop - rro - rub - ruf - rug - run - rus - sab - sag - sah - saj - saq - sas - sat - sba - sbd - sbl - sbp - sch - sck - sda - sea - seh - ses - sey - sgb - sgj - sgw - shi - shk - shn - sho - shp - sid - sig - sil - sja - sjm - sld - slk - slu - slv - sml - smo - sna - snd - sne - snn - snp - snw - som - soy - spa - spp - spy - sqi - sri - srm - srn - srp-script_cyrillic - srp-script_latin - srx - stn - stp - suc - suk - sun - sur - sus - suv - suz - swe - swh - sxb - sxn - sya - syl - sza - tac - taj - tam - tao - tap - taq - tat - tav - tbc - tbg - tbk - tbl - tby - tbz - tca - tcc - tcs - tcz - tdj - ted - tee - tel - tem - teo - ter - tes - tew - tex - tfr - tgj - tgk - tgl - tgo - tgp - tha - thk - thl - tih - tik - tir - tkr - tlb - tlj - tly - tmc - tmf - tna - tng - tnk - tnn - tnp - tnr - tnt - tob - toc - toh - tom - tos - tpi - tpm - tpp - tpt - trc - tri - trn - trs - tso - tsz - ttc - tte - ttq-script_tifinagh - tue - tuf - tuk-script_arabic - tuk-script_latin - tuo - tur - tvw - twb - twe - twu - txa - txq - txu - tye - tzh-dialect_bachajon - tzh-dialect_tenejapa - tzj-dialect_eastern - tzj-dialect_western - tzo-dialect_chamula - tzo-dialect_chenalho - ubl - ubu - udm - udu - uig-script_arabic - uig-script_cyrillic - ukr - umb - unr - upv - ura - urb - urd-script_arabic - urd-script_devanagari - urd-script_latin - urk - urt - ury - usp - uzb-script_cyrillic - uzb-script_latin - vag - vid - vie - vif - vmw - vmy - vot - vun - vut - wal-script_ethiopic - wal-script_latin - wap - war - waw - way - wba - wlo - wlx - wmw - wob - wol - wsg - wwa - xal - xdy - xed - xer - xho - xmm - xnj - xnr - xog - xon - xrb - xsb - xsm - xsr - xsu - xta - xtd - xte - xtm - xtn - xua - xuo - yaa - yad - yal - yam - yao - yas - yat - yaz - yba - ybb - ycl - ycn - yea - yka - yli - yor - yre - yua - yue-script_traditional - yuz - yva - zaa - zab - zac - zad - zae - zai - zam - zao - zaq - zar - zas - zav - zaw - zca - zga - zim - ziw - zlm - zmz - zne - zos - zpc - zpg - zpi - zpl - zpm - zpo - zpt - zpu - zpz - ztq - zty - zul - zyb - zyp - zza </details> ## Model details - **Developed by:** Vineel Pratap et al. - **Model type:** Multi-Lingual Automatic Speech Recognition model - **Language(s):** 1000+ languages, see [supported languages](#supported-languages) - **License:** CC-BY-NC 4.0 license - **Num parameters**: 1 billion - **Audio sampling rate**: 16,000 kHz - **Cite as:** @article{pratap2023mms, title={Scaling Speech Technology to 1,000+ Languages}, author={Vineel Pratap and Andros Tjandra and Bowen Shi and Paden Tomasello and Arun Babu and Sayani Kundu and Ali Elkahky and Zhaoheng Ni and Apoorv Vyas and Maryam Fazel-Zarandi and Alexei Baevski and Yossi Adi and Xiaohui Zhang and Wei-Ning Hsu and Alexis Conneau and Michael Auli}, journal={arXiv}, year={2023} } ## Additional Links - [Blog post](https://ai.facebook.com/blog/multilingual-model-speech-recognition/) - [Transformers documentation](https://huggingface.co/docs/transformers/main/en/model_doc/mms). - [Paper](https://arxiv.org/abs/2305.13516) - [GitHub Repository](https://github.com/facebookresearch/fairseq/tree/main/examples/mms#asr) - [Other **MMS** checkpoints](https://huggingface.co/models?other=mms) - MMS base checkpoints: - [facebook/mms-1b](https://huggingface.co/facebook/mms-1b) - [facebook/mms-300m](https://huggingface.co/facebook/mms-300m) - [Official Space](https://huggingface.co/spaces/facebook/MMS)
microsoft/table-transformer-structure-recognition
microsoft
"2023-09-06T14:50:49Z"
428,214
133
transformers
[ "transformers", "pytorch", "safetensors", "table-transformer", "object-detection", "arxiv:2110.00061", "license:mit", "endpoints_compatible", "has_space", "region:us" ]
object-detection
"2022-10-14T09:19:57Z"
--- license: mit widget: - src: https://documentation.tricentis.com/tosca/1420/en/content/tbox/images/table.png example_title: Table --- # Table Transformer (fine-tuned for Table Structure Recognition) Table Transformer (DETR) model trained on PubTables1M. It was introduced in the paper [PubTables-1M: Towards Comprehensive Table Extraction From Unstructured Documents](https://arxiv.org/abs/2110.00061) by Smock et al. and first released in [this repository](https://github.com/microsoft/table-transformer). Disclaimer: The team releasing Table Transformer did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The Table Transformer is equivalent to [DETR](https://huggingface.co/docs/transformers/model_doc/detr), a Transformer-based object detection model. Note that the authors decided to use the "normalize before" setting of DETR, which means that layernorm is applied before self- and cross-attention. ## Usage You can use the raw model for detecting the structure (like rows, columns) in tables. See the [documentation](https://huggingface.co/docs/transformers/main/en/model_doc/table-transformer) for more info.
h2oai/h2ogpt-4096-llama2-7b-chat
h2oai
"2023-08-24T18:35:05Z"
427,904
15
transformers
[ "transformers", "pytorch", "safetensors", "llama", "text-generation", "facebook", "meta", "llama-2", "h2ogpt", "en", "license:llama2", "autotrain_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2023-08-09T17:18:42Z"
--- inference: false language: - en license: llama2 model_type: llama pipeline_tag: text-generation tags: - facebook - meta - pytorch - llama - llama-2 - h2ogpt --- h2oGPT clone of [Meta's Llama 2 7B Chat](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf). Try it live on our [h2oGPT demo](https://gpt.h2o.ai) with side-by-side LLM comparisons and private document chat! See how it compares to other models on our [LLM Leaderboard](https://evalgpt.ai/)! See more at [H2O.ai](https://h2o.ai/) ## Model Architecture ``` LlamaForCausalLM( (model): LlamaModel( (embed_tokens): Embedding(32000, 4096, padding_idx=0) (layers): ModuleList( (0-31): 32 x LlamaDecoderLayer( (self_attn): LlamaAttention( (q_proj): Linear(in_features=4096, out_features=4096, bias=False) (k_proj): Linear(in_features=4096, out_features=4096, bias=False) (v_proj): Linear(in_features=4096, out_features=4096, bias=False) (o_proj): Linear(in_features=4096, out_features=4096, bias=False) (rotary_emb): LlamaRotaryEmbedding() ) (mlp): LlamaMLP( (gate_proj): Linear(in_features=4096, out_features=11008, bias=False) (up_proj): Linear(in_features=4096, out_features=11008, bias=False) (down_proj): Linear(in_features=11008, out_features=4096, bias=False) (act_fn): SiLUActivation() ) (input_layernorm): LlamaRMSNorm() (post_attention_layernorm): LlamaRMSNorm() ) ) (norm): LlamaRMSNorm() ) (lm_head): Linear(in_features=4096, out_features=32000, bias=False) ) ```
jackaduma/SecBERT
jackaduma
"2023-06-26T05:54:48Z"
422,940
29
transformers
[ "transformers", "pytorch", "safetensors", "bert", "fill-mask", "exbert", "security", "cybersecurity", "cyber security", "threat hunting", "threat intelligence", "en", "dataset:APTnotes", "dataset:Stucco-Data", "dataset:CASIE", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
"2022-03-02T23:29:05Z"
--- language: en thumbnail: https://github.com/jackaduma tags: - exbert - security - cybersecurity - cyber security - threat hunting - threat intelligence license: apache-2.0 datasets: - APTnotes - Stucco-Data - CASIE --- # SecBERT This is the pretrained model presented in [SecBERT: A Pretrained Language Model for Cyber Security Text](https://github.com/jackaduma/SecBERT/), which is a BERT model trained on cyber security text. The training corpus was papers taken from * [APTnotes](https://github.com/kbandla/APTnotes) * [Stucco-Data: Cyber security data sources](https://stucco.github.io/data/) * [CASIE: Extracting Cybersecurity Event Information from Text](https://ebiquity.umbc.edu/_file_directory_/papers/943.pdf) * [SemEval-2018 Task 8: Semantic Extraction from CybersecUrity REports using Natural Language Processing (SecureNLP)](https://competitions.codalab.org/competitions/17262). SecBERT has its own wordpiece vocabulary (secvocab) that's built to best match the training corpus. We trained [SecBERT](https://huggingface.co/jackaduma/SecBERT) and [SecRoBERTa](https://huggingface.co/jackaduma/SecRoBERTa) versions. Available models include: * [`SecBERT`](https://huggingface.co/jackaduma/SecBERT) * [`SecRoBERTa`](https://huggingface.co/jackaduma/SecRoBERTa) --- ## **Fill Mask** We proposed to build language model which work on cyber security text, as result, it can improve downstream tasks (NER, Text Classification, Semantic Understand, Q&A) in Cyber Security Domain. First, as below shows Fill-Mask pipeline in [Google Bert](), [AllenAI SciBert](https://github.com/allenai/scibert) and our [SecBERT](https://github.com/jackaduma/SecBERT) . <!-- <img src="./fill-mask-result.png" width="150%" height="150%"> --> ![fill-mask-result](https://github.com/jackaduma/SecBERT/blob/main/fill-mask-result.png?raw=true) --- The original repo can be found [here](https://github.com/jackaduma/SecBERT).
oliverguhr/fullstop-punctuation-multilang-large
oliverguhr
"2023-11-16T09:35:35Z"
422,837
119
transformers
[ "transformers", "pytorch", "tf", "onnx", "safetensors", "xlm-roberta", "token-classification", "punctuation prediction", "punctuation", "en", "de", "fr", "it", "multilingual", "dataset:wmt/europarl", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
token-classification
"2022-03-02T23:29:05Z"
--- language: - en - de - fr - it - multilingual tags: - punctuation prediction - punctuation datasets: wmt/europarl license: mit widget: - text: "Ho sentito che ti sei laureata il che mi fa molto piacere" example_title: "Italian" - text: "Tous les matins vers quatre heures mon père ouvrait la porte de ma chambre" example_title: "French" - text: "Ist das eine Frage Frau Müller" example_title: "German" - text: "Yet she blushed as if with guilt when Cynthia reading her thoughts said to her one day Molly you're very glad to get rid of us are not you" example_title: "English" metrics: - f1 --- This model predicts the punctuation of English, Italian, French and German texts. We developed it to restore the punctuation of transcribed spoken language. This multilanguage model was trained on the [Europarl Dataset](https://huggingface.co/datasets/wmt/europarl) provided by the [SEPP-NLG Shared Task](https://sites.google.com/view/sentence-segmentation). *Please note that this dataset consists of political speeches. Therefore the model might perform differently on texts from other domains.* The model restores the following punctuation markers: **"." "," "?" "-" ":"** ## Sample Code We provide a simple python package that allows you to process text of any length. ## Install To get started install the package from [pypi](https://pypi.org/project/deepmultilingualpunctuation/): ```bash pip install deepmultilingualpunctuation ``` ### Restore Punctuation ```python from deepmultilingualpunctuation import PunctuationModel model = PunctuationModel() text = "My name is Clara and I live in Berkeley California Ist das eine Frage Frau Müller" result = model.restore_punctuation(text) print(result) ``` **output** > My name is Clara and I live in Berkeley, California. Ist das eine Frage, Frau Müller? ### Predict Labels ```python from deepmultilingualpunctuation import PunctuationModel model = PunctuationModel() text = "My name is Clara and I live in Berkeley California Ist das eine Frage Frau Müller" clean_text = model.preprocess(text) labled_words = model.predict(clean_text) print(labled_words) ``` **output** > [['My', '0', 0.9999887], ['name', '0', 0.99998665], ['is', '0', 0.9998579], ['Clara', '0', 0.6752215], ['and', '0', 0.99990904], ['I', '0', 0.9999877], ['live', '0', 0.9999839], ['in', '0', 0.9999515], ['Berkeley', ',', 0.99800044], ['California', '.', 0.99534047], ['Ist', '0', 0.99998784], ['das', '0', 0.99999154], ['eine', '0', 0.9999918], ['Frage', ',', 0.99622655], ['Frau', '0', 0.9999889], ['Müller', '?', 0.99863917]] ## Results The performance differs for the single punctuation markers as hyphens and colons, in many cases, are optional and can be substituted by either a comma or a full stop. The model achieves the following F1 scores for the different languages: | Label | EN | DE | FR | IT | | ------------- | ----- | ----- | ----- | ----- | | 0 | 0.991 | 0.997 | 0.992 | 0.989 | | . | 0.948 | 0.961 | 0.945 | 0.942 | | ? | 0.890 | 0.893 | 0.871 | 0.832 | | , | 0.819 | 0.945 | 0.831 | 0.798 | | : | 0.575 | 0.652 | 0.620 | 0.588 | | - | 0.425 | 0.435 | 0.431 | 0.421 | | macro average | 0.775 | 0.814 | 0.782 | 0.762 | ## Languages ### Models | Languages | Model | | ------------------------------------------ | ------------------------------------------------------------ | | English, Italian, French and German | [oliverguhr/fullstop-punctuation-multilang-large](https://huggingface.co/oliverguhr/fullstop-punctuation-multilang-large) | | English, Italian, French, German and Dutch | [oliverguhr/fullstop-punctuation-multilingual-sonar-base](https://huggingface.co/oliverguhr/fullstop-punctuation-multilingual-sonar-base) | | Dutch | [oliverguhr/fullstop-dutch-sonar-punctuation-prediction](https://huggingface.co/oliverguhr/fullstop-dutch-sonar-punctuation-prediction) | ### Community Models | Languages | Model | | ------------------------------------------ | ------------------------------------------------------------ | |English, German, French, Spanish, Bulgarian, Italian, Polish, Dutch, Czech, Portugese, Slovak, Slovenian| [kredor/punctuate-all](https://huggingface.co/kredor/punctuate-all) | | Catalan | [softcatala/fullstop-catalan-punctuation-prediction](https://huggingface.co/softcatala/fullstop-catalan-punctuation-prediction) | | Welsh | [techiaith/fullstop-welsh-punctuation-prediction](https://huggingface.co/techiaith/fullstop-welsh-punctuation-prediction) | You can use different models by setting the model parameter: ```python model = PunctuationModel(model = "oliverguhr/fullstop-dutch-punctuation-prediction") ``` ## Where do I find the code and can I train my own model? Yes you can! For complete code of the reareach project take a look at [this repository](https://github.com/oliverguhr/fullstop-deep-punctuation-prediction). There is also an guide on [how to fine tune this model for you data / language](https://github.com/oliverguhr/fullstop-deep-punctuation-prediction/blob/main/other_languages/readme.md). ## References ``` @article{guhr-EtAl:2021:fullstop, title={FullStop: Multilingual Deep Models for Punctuation Prediction}, author = {Guhr, Oliver and Schumann, Anne-Kathrin and Bahrmann, Frank and Böhme, Hans Joachim}, booktitle = {Proceedings of the Swiss Text Analytics Conference 2021}, month = {June}, year = {2021}, address = {Winterthur, Switzerland}, publisher = {CEUR Workshop Proceedings}, url = {http://ceur-ws.org/Vol-2957/sepp_paper4.pdf} } ```
microsoft/wavlm-large
microsoft
"2022-02-02T21:21:50Z"
412,519
41
transformers
[ "transformers", "pytorch", "wavlm", "feature-extraction", "speech", "en", "arxiv:1912.07875", "arxiv:2106.06909", "arxiv:2101.00390", "arxiv:2110.13900", "has_space", "region:us" ]
feature-extraction
"2022-03-02T23:29:05Z"
--- language: - en tags: - speech inference: false --- # WavLM-Large [Microsoft's WavLM](https://github.com/microsoft/unilm/tree/master/wavlm) The large model pretrained on 16kHz sampled speech audio. When using the model, make sure that your speech input is also sampled at 16kHz. **Note**: This model does not have a tokenizer as it was pretrained on audio alone. In order to use this model **speech recognition**, a tokenizer should be created and the model should be fine-tuned on labeled text data. Check out [this blog](https://huggingface.co/blog/fine-tune-wav2vec2-english) for more in-detail explanation of how to fine-tune the model. The model was pre-trained on: - 60,000 hours of [Libri-Light](https://arxiv.org/abs/1912.07875) - 10,000 hours of [GigaSpeech](https://arxiv.org/abs/2106.06909) - 24,000 hours of [VoxPopuli](https://arxiv.org/abs/2101.00390) [Paper: WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) Authors: Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei **Abstract** *Self-supervised learning (SSL) achieves great success in speech recognition, while limited exploration has been attempted for other speech processing tasks. As speech signal contains multi-faceted information including speaker identity, paralinguistics, spoken content, etc., learning universal representations for all speech tasks is challenging. In this paper, we propose a new pre-trained model, WavLM, to solve full-stack downstream speech tasks. WavLM is built based on the HuBERT framework, with an emphasis on both spoken content modeling and speaker identity preservation. We first equip the Transformer structure with gated relative position bias to improve its capability on recognition tasks. For better speaker discrimination, we propose an utterance mixing training strategy, where additional overlapped utterances are created unsupervisely and incorporated during model training. Lastly, we scale up the training dataset from 60k hours to 94k hours. WavLM Large achieves state-of-the-art performance on the SUPERB benchmark, and brings significant improvements for various speech processing tasks on their representative benchmarks.* The original model can be found under https://github.com/microsoft/unilm/tree/master/wavlm. # Usage This is an English pre-trained speech model that has to be fine-tuned on a downstream task like speech recognition or audio classification before it can be used in inference. The model was pre-trained in English and should therefore perform well only in English. The model has been shown to work well on the [SUPERB benchmark](https://superbbenchmark.org/). **Note**: The model was pre-trained on phonemes rather than characters. This means that one should make sure that the input text is converted to a sequence of phonemes before fine-tuning. ## Speech Recognition To fine-tune the model for speech recognition, see [the official speech recognition example](https://github.com/huggingface/transformers/tree/master/examples/pytorch/speech-recognition). ## Speech Classification To fine-tune the model for speech classification, see [the official audio classification example](https://github.com/huggingface/transformers/tree/master/examples/pytorch/audio-classification). ## Speaker Verification TODO ## Speaker Diarization TODO # Contribution The model was contributed by [cywang](https://huggingface.co/cywang) and [patrickvonplaten](https://huggingface.co/patrickvonplaten). # License The official license can be found [here](https://github.com/microsoft/UniSpeech/blob/main/LICENSE) ![design](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/wavlm.png)
distil-whisper/distil-medium.en
distil-whisper
"2024-03-25T12:07:23Z"
411,323
102
transformers
[ "transformers", "pytorch", "jax", "tensorboard", "onnx", "safetensors", "whisper", "automatic-speech-recognition", "audio", "transformers.js", "en", "arxiv:2311.00430", "arxiv:2210.13352", "license:mit", "endpoints_compatible", "has_space", "region:us" ]
automatic-speech-recognition
"2023-10-24T15:49:07Z"
--- language: - en tags: - audio - automatic-speech-recognition - transformers.js widget: - example_title: LibriSpeech sample 1 src: https://cdn-media.huggingface.co/speech_samples/sample1.flac - example_title: LibriSpeech sample 2 src: https://cdn-media.huggingface.co/speech_samples/sample2.flac pipeline_tag: automatic-speech-recognition license: mit library_name: transformers --- # Distil-Whisper: distil-medium.en Distil-Whisper was proposed in the paper [Robust Knowledge Distillation via Large-Scale Pseudo Labelling](https://arxiv.org/abs/2311.00430). It is a distilled version of the Whisper model that is **6 times faster**, 49% smaller, and performs **within 1% WER** on out-of-distribution evaluation sets. This is the repository for distil-medium.en, a distilled variant of [Whisper medium.en](https://huggingface.co/openai/whisper-medium.en). | Model | Params / M | Rel. Latency ↑ | Short-Form WER ↓ | Long-Form WER ↓ | |----------------------------------------------------------------------------|------------|----------------|------------------|-----------------| | [large-v3](https://huggingface.co/openai/whisper-large-v3) | 1550 | 1.0 | **8.4** | 11.0 | | [large-v2](https://huggingface.co/openai/whisper-large-v2) | 1550 | 1.0 | 9.1 | 11.7 | | | | | | | | [distil-large-v3](https://huggingface.co/distil-whisper/distil-large-v3) | 756 | 6.3 | 9.7 | **10.8** | | [distil-large-v2](https://huggingface.co/distil-whisper/distil-large-v2) | 756 | 5.8 | 10.1 | 11.6 | | [distil-medium.en](https://huggingface.co/distil-whisper/distil-medium.en) | 394 | **6.8** | 11.1 | 12.4 | | [distil-small.en](https://huggingface.co/distil-whisper/distil-small.en) | **166** | 5.6 | 12.1 | 12.8 | **Note:** Distil-Whisper is currently only available for English speech recognition. We are working with the community to distill Whisper on other languages. If you are interested in distilling Whisper in your language, check out the provided [training code](https://github.com/huggingface/distil-whisper/tree/main/training). We will update the [Distil-Whisper repository](https://github.com/huggingface/distil-whisper/) with multilingual checkpoints when ready! ## Usage Distil-Whisper is supported in Hugging Face 🤗 Transformers from version 4.35 onwards. To run the model, first install the latest version of the Transformers library. For this example, we'll also install 🤗 Datasets to load toy audio dataset from the Hugging Face Hub: ```bash pip install --upgrade pip pip install --upgrade transformers accelerate datasets[audio] ``` ### Short-Form Transcription The model can be used with the [`pipeline`](https://huggingface.co/docs/transformers/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline) class to transcribe short-form audio files (< 30-seconds) as follows: ```python import torch from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline from datasets import load_dataset device = "cuda:0" if torch.cuda.is_available() else "cpu" torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 model_id = "distil-whisper/distil-medium.en" model = AutoModelForSpeechSeq2Seq.from_pretrained( model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True ) model.to(device) processor = AutoProcessor.from_pretrained(model_id) pipe = pipeline( "automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, torch_dtype=torch_dtype, device=device, ) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") sample = dataset[0]["audio"] result = pipe(sample) print(result["text"]) ``` To transcribe a local audio file, simply pass the path to your audio file when you call the pipeline: ```diff - result = pipe(sample) + result = pipe("audio.mp3") ``` ### Long-Form Transcription Distil-Whisper uses a chunked algorithm to transcribe long-form audio files (> 30-seconds). In practice, this chunked long-form algorithm is 9x faster than the sequential algorithm proposed by OpenAI in the Whisper paper (see Table 7 of the [Distil-Whisper paper](https://arxiv.org/abs/2311.00430)). To enable chunking, pass the `chunk_length_s` parameter to the `pipeline`. For Distil-Whisper, a chunk length of 15-seconds is optimal. To activate batching, pass the argument `batch_size`: ```python import torch from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline from datasets import load_dataset device = "cuda:0" if torch.cuda.is_available() else "cpu" torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 model_id = "distil-whisper/distil-medium.en" model = AutoModelForSpeechSeq2Seq.from_pretrained( model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True ) model.to(device) processor = AutoProcessor.from_pretrained(model_id) pipe = pipeline( "automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, chunk_length_s=15, batch_size=16, torch_dtype=torch_dtype, device=device, ) dataset = load_dataset("distil-whisper/librispeech_long", "default", split="validation") sample = dataset[0]["audio"] result = pipe(sample) print(result["text"]) ``` <!--- **Tip:** The pipeline can also be used to transcribe an audio file from a remote URL, for example: ```python result = pipe("https://huggingface.co/datasets/sanchit-gandhi/librispeech_long/resolve/main/audio.wav") ``` ---> ### Speculative Decoding Distil-Whisper can be used as an assistant model to Whisper for [speculative decoding](https://huggingface.co/blog/whisper-speculative-decoding). Speculative decoding mathematically ensures the exact same outputs as Whisper are obtained while being 2 times faster. This makes it the perfect drop-in replacement for existing Whisper pipelines, since the same outputs are guaranteed. In the following code-snippet, we load the assistant Distil-Whisper model standalone to the main Whisper pipeline. We then specify it as the "assistant model" for generation: ```python from transformers import pipeline, AutoModelForCausalLM, AutoModelForSpeechSeq2Seq, AutoProcessor import torch from datasets import load_dataset device = "cuda:0" if torch.cuda.is_available() else "cpu" torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 assistant_model_id = "distil-whisper/distil-medium.en" assistant_model = AutoModelForCausalLM.from_pretrained( assistant_model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True ) assistant_model.to(device) model_id = "openai/whisper-medium.en" model = AutoModelForSpeechSeq2Seq.from_pretrained( model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True ) model.to(device) processor = AutoProcessor.from_pretrained(model_id) pipe = pipeline( "automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, generate_kwargs={"assistant_model": assistant_model}, torch_dtype=torch_dtype, device=device, ) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") sample = dataset[0]["audio"] result = pipe(sample) print(result["text"]) ``` ## Additional Speed & Memory Improvements You can apply additional speed and memory improvements to Distil-Whisper which we cover in the following. ### Flash Attention We recommend using [Flash-Attention 2](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#flashattention-2) if your GPU allows for it. To do so, you first need to install [Flash Attention](https://github.com/Dao-AILab/flash-attention): ``` pip install flash-attn --no-build-isolation ``` and then all you have to do is to pass `use_flash_attention_2=True` to `from_pretrained`: ```diff - model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True) + model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, use_flash_attention_2=True) ``` ### Torch Scale-Product-Attention (SDPA) If your GPU does not support Flash Attention, we recommend making use of [BetterTransformers](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#bettertransformer). To do so, you first need to install optimum: ``` pip install --upgrade optimum ``` And then convert your model to a "BetterTransformer" model before using it: ```diff model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True) + model = model.to_bettertransformer() ``` ### Running Distil-Whisper in `openai-whisper` To use the model in the original Whisper format, first ensure you have the [`openai-whisper`](https://pypi.org/project/openai-whisper/) package installed: ```bash pip install --upgrade openai-whisper ``` The following code-snippet demonstrates how to transcribe a sample file from the LibriSpeech dataset loaded using 🤗 Datasets: ```python import torch from datasets import load_dataset from huggingface_hub import hf_hub_download from whisper import load_model, transcribe medium_en = hf_hub_download(repo_id="distil-whisper/distil-medium.en", filename="original-model.bin") model = load_model(medium_en) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") sample = dataset[0]["audio"]["array"] sample = torch.from_numpy(sample).float() pred_out = transcribe(model, audio=sample) print(pred_out["text"]) ``` To transcribe a local audio file, simply pass the path to the audio file as the `audio` argument to transcribe: ```python pred_out = transcribe(model, audio="audio.mp3") ``` ### Whisper.cpp Distil-Whisper can be run from the [Whisper.cpp](https://github.com/ggerganov/whisper.cpp) repository with the original sequential long-form transcription algorithm. In a [provisional benchmark](https://github.com/ggerganov/whisper.cpp/pull/1424#issuecomment-1793513399) on Mac M1, `distil-medium.en` is 4x faster than `large-v2`, while performing to within 1% WER over long-form audio. Steps for getting started: 1. Clone the Whisper.cpp repository: ``` git clone https://github.com/ggerganov/whisper.cpp.git cd whisper.cpp ``` 2. Download the ggml weights for `distil-medium.en` from the Hugging Face Hub: ```bash python -c "from huggingface_hub import hf_hub_download; hf_hub_download(repo_id='distil-whisper/distil-medium.en', filename='ggml-medium-32-2.en.bin', local_dir='./models')" ``` Note that if you do not have the `huggingface_hub` package installed, you can also download the weights with `wget`: ```bash wget https://huggingface.co/distil-whisper/distil-medium.en/resolve/main/ggml-medium-32-2.en.bin -P ./models ``` 3. Run inference using the provided sample audio: ```bash make -j && ./main -m models/ggml-medium-32-2.en.bin -f samples/jfk.wav ``` ### Transformers.js ```js import { pipeline } from '@xenova/transformers'; let transcriber = await pipeline('automatic-speech-recognition', 'distil-whisper/distil-medium.en'); let url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav'; let output = await transcriber(url); // { text: " And so my fellow Americans, ask not what your country can do for you. Ask what you can do for your country." } ``` See the [docs](https://huggingface.co/docs/transformers.js/api/pipelines#module_pipelines.AutomaticSpeechRecognitionPipeline) for more information. ### Candle Through an integration with Hugging Face [Candle](https://github.com/huggingface/candle/tree/main) 🕯️, Distil-Whisper is now available in the Rust library 🦀 Benefit from: * Optimised CPU backend with optional MKL support for x86 and Accelerate for Macs * CUDA backend for efficiently running on GPUs, multiple GPU distribution via NCCL * WASM support: run Distil-Whisper in a browser Steps for getting started: 1. Install [`candle-core`](https://github.com/huggingface/candle/tree/main/candle-core) as explained [here](https://huggingface.github.io/candle/guide/installation.html) 2. Clone the `candle` repository locally: ``` git clone https://github.com/huggingface/candle.git ``` 3. Enter the example directory for [Whisper](https://github.com/huggingface/candle/tree/main/candle-examples/examples/whisper): ``` cd candle/candle-examples/examples/whisper ``` 4. Run an example: ``` cargo run --example whisper --release -- --model distil-medium.en ``` 5. To specify your own audio file, add the `--input` flag: ``` cargo run --example whisper --release -- --model distil-medium.en --input audio.wav ``` ### 8bit & 4bit Quantization Coming soon ... ## Model Details Distil-Whisper inherits the encoder-decoder architecture from Whisper. The encoder maps a sequence of speech vector inputs to a sequence of hidden-state vectors. The decoder auto-regressively predicts text tokens, conditional on all previous tokens and the encoder hidden-states. Consequently, the encoder is only run forward once, whereas the decoder is run as many times as the number of tokens generated. In practice, this means the decoder accounts for over 90% of total inference time. Thus, to optimise for latency, the focus should be on minimising the inference time of the decoder. To distill the Whisper model, we reduce the number of decoder layers while keeping the encoder fixed. The encoder (shown in green) is entirely copied from the teacher to the student and frozen during training. The student's decoder consists of only two decoder layers, which are initialised from the first and last decoder layer of the teacher (shown in red). All other decoder layers of the teacher are discarded. The model is then trained on a weighted sum of the KL divergence and pseudo-label loss terms. <p align="center"> <img src="https://huggingface.co/datasets/distil-whisper/figures/resolve/main/architecture.png?raw=true" width="600"/> </p> ## Evaluation The following code-snippets demonstrates how to evaluate the Distil-Whisper model on the LibriSpeech validation.clean dataset with [streaming mode](https://huggingface.co/blog/audio-datasets#streaming-mode-the-silver-bullet), meaning no audio data has to be downloaded to your local device. First, we need to install the required packages, including 🤗 Datasets to stream and load the audio data, and 🤗 Evaluate to perform the WER calculation: ```bash pip install --upgrade pip pip install --upgrade transformers datasets[audio] evaluate jiwer ``` Evaluation can then be run end-to-end with the following example: ```python from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor from transformers.models.whisper.english_normalizer import EnglishTextNormalizer from datasets import load_dataset from evaluate import load import torch from tqdm import tqdm # define our torch configuration device = "cuda:0" if torch.cuda.is_available() else "cpu" torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 model_id = "distil-whisper/distil-medium.en" # load the model + processor model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype, use_safetensors=True, low_cpu_mem_usage=True) model = model.to(device) processor = AutoProcessor.from_pretrained(model_id) # load the dataset with streaming mode dataset = load_dataset("librispeech_asr", "clean", split="validation", streaming=True) # define the evaluation metric wer_metric = load("wer") normalizer = EnglishTextNormalizer(processor.tokenizer.english_spelling_normalizer) def inference(batch): # 1. Pre-process the audio data to log-mel spectrogram inputs audio = [sample["array"] for sample in batch["audio"]] input_features = processor(audio, sampling_rate=batch["audio"][0]["sampling_rate"], return_tensors="pt").input_features input_features = input_features.to(device, dtype=torch_dtype) # 2. Auto-regressively generate the predicted token ids pred_ids = model.generate(input_features, max_new_tokens=128) # 3. Decode the token ids to the final transcription batch["transcription"] = processor.batch_decode(pred_ids, skip_special_tokens=True) batch["reference"] = batch["text"] return batch dataset = dataset.map(function=inference, batched=True, batch_size=16) all_transcriptions = [] all_references = [] # iterate over the dataset and run inference for i, result in tqdm(enumerate(dataset), desc="Evaluating..."): all_transcriptions.append(result["transcription"]) all_references.append(result["reference"]) # normalize predictions and references all_transcriptions = [normalizer(transcription) for transcription in all_transcriptions] all_references = [normalizer(reference) for reference in all_references] # compute the WER metric wer = 100 * wer_metric.compute(predictions=all_transcriptions, references=all_references) print(wer) ``` **Print Output:** ``` 3.593196832001168 ``` ## Intended Use Distil-Whisper is intended to be a drop-in replacement for Whisper on English speech recognition. In particular, it achieves comparable WER results over out-of-distribution test data, while being 6x faster over both short and long-form audio. ## Data Distil-Whisper is trained on 22,000 hours of audio data from 9 open-source, permissively licensed speech datasets on the Hugging Face Hub: | Dataset | Size / h | Speakers | Domain | Licence | |-----------------------------------------------------------------------------------------|----------|----------|-----------------------------|-----------------| | [People's Speech](https://huggingface.co/datasets/MLCommons/peoples_speech) | 12,000 | unknown | Internet Archive | CC-BY-SA-4.0 | | [Common Voice 13](https://huggingface.co/datasets/mozilla-foundation/common_voice_13_0) | 3,000 | unknown | Narrated Wikipedia | CC0-1.0 | | [GigaSpeech](https://huggingface.co/datasets/speechcolab/gigaspeech) | 2,500 | unknown | Audiobook, podcast, YouTube | apache-2.0 | | Fisher | 1,960 | 11,900 | Telephone conversations | LDC | | [LibriSpeech](https://huggingface.co/datasets/librispeech_asr) | 960 | 2,480 | Audiobooks | CC-BY-4.0 | | [VoxPopuli](https://huggingface.co/datasets/facebook/voxpopuli) | 540 | 1,310 | European Parliament | CC0 | | [TED-LIUM](https://huggingface.co/datasets/LIUM/tedlium) | 450 | 2,030 | TED talks | CC-BY-NC-ND 3.0 | | SwitchBoard | 260 | 540 | Telephone conversations | LDC | | [AMI](https://huggingface.co/datasets/edinburghcstr/ami) | 100 | unknown | Meetings | CC-BY-4.0 | |||||| | **Total** | 21,770 | 18,260+ | | | The combined dataset spans 10 distinct domains and over 50k speakers. The diversity of this dataset is crucial to ensuring the distilled model is robust to audio distributions and noise. The audio data is then pseudo-labelled using the Whisper large-v2 model: we use Whisper to generate predictions for all the audio in our training set and use these as the target labels during training. Using pseudo-labels ensures that the transcriptions are consistently formatted across datasets and provides sequence-level distillation signal during training. ## WER Filter The Whisper pseudo-label predictions are subject to mis-transcriptions and hallucinations. To ensure we only train on accurate pseudo-labels, we employ a simple WER heuristic during training. First, we normalise the Whisper pseudo-labels and the ground truth labels provided by each dataset. We then compute the WER between these labels. If the WER exceeds a specified threshold, we discard the training example. Otherwise, we keep it for training. Section 9.2 of the [Distil-Whisper paper](https://arxiv.org/abs/2311.00430) demonstrates the effectiveness of this filter for improving downstream performance of the distilled model. We also partially attribute Distil-Whisper's robustness to hallucinations to this filter. ## Training The model was trained for 80,000 optimisation steps (or eight epochs). The Tensorboard training logs can be found under: https://huggingface.co/distil-whisper/distil-medium.en/tensorboard?params=scalars#frame ## Results The distilled model performs to within 1% WER of Whisper on out-of-distribution (OOD) short-form audio, and outperforms Whisper by 0.1% on OOD long-form audio. This performance gain is attributed to lower hallucinations. For a detailed per-dataset breakdown of the evaluation results, refer to Tables 16 and 17 of the [Distil-Whisper paper](https://arxiv.org/abs/2311.00430) Distil-Whisper is also evaluated on the [ESB benchmark](https://arxiv.org/abs/2210.13352) datasets as part of the [OpenASR leaderboard](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard), where it performs to within 0.2% WER of Whisper. ## Reproducing Distil-Whisper Training and evaluation code to reproduce Distil-Whisper is available under the Distil-Whisper repository: https://github.com/huggingface/distil-whisper/tree/main/training ## License Distil-Whisper inherits the [MIT license](https://github.com/huggingface/distil-whisper/blob/main/LICENSE) from OpenAI's Whisper model. ## Citation If you use this model, please consider citing the [Distil-Whisper paper](https://arxiv.org/abs/2311.00430): ``` @misc{gandhi2023distilwhisper, title={Distil-Whisper: Robust Knowledge Distillation via Large-Scale Pseudo Labelling}, author={Sanchit Gandhi and Patrick von Platen and Alexander M. Rush}, year={2023}, eprint={2311.00430}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ## Acknowledgements * OpenAI for the Whisper [model](https://huggingface.co/openai/whisper-large-v2) and [original codebase](https://github.com/openai/whisper) * Hugging Face 🤗 [Transformers](https://github.com/huggingface/transformers) for the model integration * Google's [TPU Research Cloud (TRC)](https://sites.research.google/trc/about/) programme for Cloud TPU v4s * [`@rsonavane`](https://huggingface.co/rsonavane/distil-whisper-large-v2-8-ls) for releasing an early iteration of Distil-Whisper on the LibriSpeech dataset
finiteautomata/beto-sentiment-analysis
finiteautomata
"2023-02-25T14:23:57Z"
410,396
23
transformers
[ "transformers", "pytorch", "jax", "bert", "text-classification", "sentiment-analysis", "es", "arxiv:2106.09462", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text-classification
"2022-03-02T23:29:05Z"
--- language: - es tags: - sentiment-analysis --- # Sentiment Analysis in Spanish ## beto-sentiment-analysis **NOTE: this model will be removed soon -- use [pysentimiento/robertuito-sentiment-analysis](https://huggingface.co/pysentimiento/robertuito-sentiment-analysis) instead** Repository: [https://github.com/finiteautomata/pysentimiento/](https://github.com/pysentimiento/pysentimiento/) Model trained with TASS 2020 corpus (around ~5k tweets) of several dialects of Spanish. Base model is [BETO](https://github.com/dccuchile/beto), a BERT model trained in Spanish. Uses `POS`, `NEG`, `NEU` labels. ## License `pysentimiento` is an open-source library for non-commercial use and scientific research purposes only. Please be aware that models are trained with third-party datasets and are subject to their respective licenses. 1. [TASS Dataset license](http://tass.sepln.org/tass_data/download.php) 2. [SEMEval 2017 Dataset license]() ## Citation If you use this model in your work, please cite the following papers: ``` @misc{perez2021pysentimiento, title={pysentimiento: A Python Toolkit for Sentiment Analysis and SocialNLP tasks}, author={Juan Manuel Pérez and Juan Carlos Giudici and Franco Luque}, year={2021}, eprint={2106.09462}, archivePrefix={arXiv}, primaryClass={cs.CL} } @article{canete2020spanish, title={Spanish pre-trained bert model and evaluation data}, author={Ca{\~n}ete, Jos{\'e} and Chaperon, Gabriel and Fuentes, Rodrigo and Ho, Jou-Hui and Kang, Hojin and P{\'e}rez, Jorge}, journal={Pml4dc at iclr}, volume={2020}, number={2020}, pages={1--10}, year={2020} } ``` Enjoy! 🤗
runwayml/stable-diffusion-inpainting
runwayml
"2023-07-05T01:09:17Z"
410,151
1,456
diffusers
[ "diffusers", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "arxiv:2207.12598", "arxiv:2112.10752", "arxiv:2103.00020", "arxiv:2205.11487", "arxiv:1910.09700", "license:creativeml-openrail-m", "has_space", "diffusers:StableDiffusionInpaintPipeline", "region:us" ]
text-to-image
"2022-10-17T02:48:32Z"
--- license: creativeml-openrail-m tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image inference: false library_name: diffusers extra_gated_prompt: |- One more step before getting this model. This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content 2. CompVis claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license here: https://huggingface.co/spaces/CompVis/stable-diffusion-license By clicking on "Access repository" below, you accept that your *contact information* (email address and username) can be shared with the model authors as well. extra_gated_fields: I have read the License and agree with its terms: checkbox --- Stable Diffusion Inpainting is a latent text-to-image diffusion model capable of generating photo-realistic images given any text input, with the extra capability of inpainting the pictures by using a mask. The **Stable-Diffusion-Inpainting** was initialized with the weights of the [Stable-Diffusion-v-1-2](https://steps/huggingface.co/CompVis/stable-diffusion-v-1-2-original). First 595k steps regular training, then 440k steps of inpainting training at resolution 512x512 on “laion-aesthetics v2 5+” and 10% dropping of the text-conditioning to improve classifier-free [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598). For inpainting, the UNet has 5 additional input channels (4 for the encoded masked-image and 1 for the mask itself) whose weights were zero-initialized after restoring the non-inpainting checkpoint. During training, we generate synthetic masks and in 25% mask everything. [![Open In Spaces](https://camo.githubusercontent.com/00380c35e60d6b04be65d3d94a58332be5cc93779f630bcdfc18ab9a3a7d3388/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f25463025394625413425393725323048756767696e67253230466163652d5370616365732d626c7565)](https://huggingface.co/spaces/runwayml/stable-diffusion-inpainting) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb) :-------------------------:|:-------------------------:| ## Examples: You can use this both with the [🧨Diffusers library](https://github.com/huggingface/diffusers) and the [RunwayML GitHub repository](https://github.com/runwayml/stable-diffusion). ### Diffusers ```python from diffusers import StableDiffusionInpaintPipeline pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", revision="fp16", torch_dtype=torch.float16, ) prompt = "Face of a yellow cat, high resolution, sitting on a park bench" #image and mask_image should be PIL images. #The mask structure is white for inpainting and black for keeping as is image = pipe(prompt=prompt, image=image, mask_image=mask_image).images[0] image.save("./yellow_cat_on_park_bench.png") ``` **How it works:** `image` | `mask_image` :-------------------------:|:-------------------------:| <img src="https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" alt="drawing" width="300"/> | <img src="https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" alt="drawing" width="300"/> `prompt` | `Output` :-------------------------:|:-------------------------:| <span style="position: relative;bottom: 150px;">Face of a yellow cat, high resolution, sitting on a park bench</span> | <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/test.png" alt="drawing" width="300"/> ### Original GitHub Repository 1. Download the weights [sd-v1-5-inpainting.ckpt](https://huggingface.co/runwayml/stable-diffusion-inpainting/resolve/main/sd-v1-5-inpainting.ckpt) 2. Follow instructions [here](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion). ## Model Details - **Developed by:** Robin Rombach, Patrick Esser - **Model type:** Diffusion-based text-to-image generation model - **Language(s):** English - **License:** [The CreativeML OpenRAIL M license](https://huggingface.co/spaces/CompVis/stable-diffusion-license) is an [Open RAIL M license](https://www.licenses.ai/blog/2022/8/18/naming-convention-of-responsible-ai-licenses), adapted from the work that [BigScience](https://bigscience.huggingface.co/) and [the RAIL Initiative](https://www.licenses.ai/) are jointly carrying in the area of responsible AI licensing. See also [the article about the BLOOM Open RAIL license](https://bigscience.huggingface.co/blog/the-bigscience-rail-license) on which our license is based. - **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a [Latent Diffusion Model](https://arxiv.org/abs/2112.10752) that uses a fixed, pretrained text encoder ([CLIP ViT-L/14](https://arxiv.org/abs/2103.00020)) as suggested in the [Imagen paper](https://arxiv.org/abs/2205.11487). - **Resources for more information:** [GitHub Repository](https://github.com/runwayml/stable-diffusion), [Paper](https://arxiv.org/abs/2112.10752). - **Cite as:** @InProceedings{Rombach_2022_CVPR, author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn}, title = {High-Resolution Image Synthesis With Latent Diffusion Models}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2022}, pages = {10684-10695} } # Uses ## Direct Use The model is intended for research purposes only. Possible research areas and tasks include - Safe deployment of models which have the potential to generate harmful content. - Probing and understanding the limitations and biases of generative models. - Generation of artworks and use in design and other artistic processes. - Applications in educational or creative tools. - Research on generative models. Excluded uses are described below. ### Misuse, Malicious Use, and Out-of-Scope Use _Note: This section is taken from the [DALLE-MINI model card](https://huggingface.co/dalle-mini/dalle-mini), but applies in the same way to Stable Diffusion v1_. The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes. #### Out-of-Scope Use The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model. #### Misuse and Malicious Use Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to: - Generating demeaning, dehumanizing, or otherwise harmful representations of people or their environments, cultures, religions, etc. - Intentionally promoting or propagating discriminatory content or harmful stereotypes. - Impersonating individuals without their consent. - Sexual content without consent of the people who might see it. - Mis- and disinformation - Representations of egregious violence and gore - Sharing of copyrighted or licensed material in violation of its terms of use. - Sharing content that is an alteration of copyrighted or licensed material in violation of its terms of use. ## Limitations and Bias ### Limitations - The model does not achieve perfect photorealism - The model cannot render legible text - The model does not perform well on more difficult tasks which involve compositionality, such as rendering an image corresponding to “A red cube on top of a blue sphere” - Faces and people in general may not be generated properly. - The model was trained mainly with English captions and will not work as well in other languages. - The autoencoding part of the model is lossy - The model was trained on a large-scale dataset [LAION-5B](https://laion.ai/blog/laion-5b/) which contains adult material and is not fit for product use without additional safety mechanisms and considerations. - No additional measures were used to deduplicate the dataset. As a result, we observe some degree of memorization for images that are duplicated in the training data. The training data can be searched at [https://rom1504.github.io/clip-retrieval/](https://rom1504.github.io/clip-retrieval/) to possibly assist in the detection of memorized images. ### Bias While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases. Stable Diffusion v1 was trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/), which consists of images that are primarily limited to English descriptions. Texts and images from communities and cultures that use other languages are likely to be insufficiently accounted for. This affects the overall output of the model, as white and western cultures are often set as the default. Further, the ability of the model to generate content with non-English prompts is significantly worse than with English-language prompts. ## Training **Training Data** The model developers used the following dataset for training the model: - LAION-2B (en) and subsets thereof (see next section) **Training Procedure** Stable Diffusion v1 is a latent diffusion model which combines an autoencoder with a diffusion model that is trained in the latent space of the autoencoder. During training, - Images are encoded through an encoder, which turns images into latent representations. The autoencoder uses a relative downsampling factor of 8 and maps images of shape H x W x 3 to latents of shape H/f x W/f x 4 - Text prompts are encoded through a ViT-L/14 text-encoder. - The non-pooled output of the text encoder is fed into the UNet backbone of the latent diffusion model via cross-attention. - The loss is a reconstruction objective between the noise that was added to the latent and the prediction made by the UNet. We currently provide six checkpoints, `sd-v1-1.ckpt`, `sd-v1-2.ckpt` and `sd-v1-3.ckpt`, `sd-v1-4.ckpt`, `sd-v1-5.ckpt` and `sd-v1-5-inpainting.ckpt` which were trained as follows, - `sd-v1-1.ckpt`: 237k steps at resolution `256x256` on [laion2B-en](https://huggingface.co/datasets/laion/laion2B-en). 194k steps at resolution `512x512` on [laion-high-resolution](https://huggingface.co/datasets/laion/laion-high-resolution) (170M examples from LAION-5B with resolution `>= 1024x1024`). - `sd-v1-2.ckpt`: Resumed from `sd-v1-1.ckpt`. 515k steps at resolution `512x512` on "laion-improved-aesthetics" (a subset of laion2B-en, filtered to images with an original size `>= 512x512`, estimated aesthetics score `> 5.0`, and an estimated watermark probability `< 0.5`. The watermark estimate is from the LAION-5B metadata, the aesthetics score is estimated using an [improved aesthetics estimator](https://github.com/christophschuhmann/improved-aesthetic-predictor)). - `sd-v1-3.ckpt`: Resumed from `sd-v1-2.ckpt`. 195k steps at resolution `512x512` on "laion-improved-aesthetics" and 10\% dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598). - `sd-v1-4.ckpt`: Resumed from stable-diffusion-v1-2.225,000 steps at resolution 512x512 on "laion-aesthetics v2 5+" and 10 % dropping of the text-conditioning to [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598). - `sd-v1-5.ckpt`: Resumed from sd-v1-2.ckpt. 595k steps at resolution 512x512 on "laion-aesthetics v2 5+" and 10% dropping of the text-conditioning to improve classifier-free guidance sampling. - `sd-v1-5-inpaint.ckpt`: Resumed from sd-v1-2.ckpt. 595k steps at resolution 512x512 on "laion-aesthetics v2 5+" and 10% dropping of the text-conditioning to improve classifier-free guidance sampling. Then 440k steps of inpainting training at resolution 512x512 on “laion-aesthetics v2 5+” and 10% dropping of the text-conditioning. For inpainting, the UNet has 5 additional input channels (4 for the encoded masked-image and 1 for the mask itself) whose weights were zero-initialized after restoring the non-inpainting checkpoint. During training, we generate synthetic masks and in 25% mask everything. - **Hardware:** 32 x 8 x A100 GPUs - **Optimizer:** AdamW - **Gradient Accumulations**: 2 - **Batch:** 32 x 8 x 2 x 4 = 2048 - **Learning rate:** warmup to 0.0001 for 10,000 steps and then kept constant ## Evaluation Results Evaluations with different classifier-free guidance scales (1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0) and 50 PLMS sampling steps show the relative improvements of the checkpoints: ![pareto](https://huggingface.co/CompVis/stable-diffusion/resolve/main/v1-1-to-v1-5.png) Evaluated using 50 PLMS steps and 10000 random prompts from the COCO2017 validation set, evaluated at 512x512 resolution. Not optimized for FID scores. ## Inpainting Evaluation To assess the performance of the inpainting model, we used the same evaluation protocol as in our [LDM paper](https://arxiv.org/abs/2112.10752). Since the Stable Diffusion Inpainting Model acccepts a text input, we simply used a fixed prompt of `photograph of a beautiful empty scene, highest quality settings`. | Model | FID | LPIPS | |-----------------------------|------|------------------| | Stable Diffusion Inpainting | 1.00 | 0.141 (+- 0.082) | | Latent Diffusion Inpainting | 1.50 | 0.137 (+- 0.080) | | CoModGAN | 1.82 | 0.15 | | LaMa | 2.21 | 0.134 (+- 0.080) | ## Environmental Impact **Stable Diffusion v1** **Estimated Emissions** Based on that information, we estimate the following CO2 emissions using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). The hardware, runtime, cloud provider, and compute region were utilized to estimate the carbon impact. - **Hardware Type:** A100 PCIe 40GB - **Hours used:** 150000 - **Cloud Provider:** AWS - **Compute Region:** US-east - **Carbon Emitted (Power consumption x Time x Carbon produced based on location of power grid):** 11250 kg CO2 eq. ## Citation ```bibtex @InProceedings{Rombach_2022_CVPR, author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn}, title = {High-Resolution Image Synthesis With Latent Diffusion Models}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2022}, pages = {10684-10695} } ``` *This model card was written by: Robin Rombach and Patrick Esser and is based on the [DALL-E Mini model card](https://huggingface.co/dalle-mini/dalle-mini).*
google/gemma-2b-it
google
"2024-04-11T17:59:28Z"
408,335
482
transformers
[ "transformers", "safetensors", "gguf", "gemma", "text-generation", "conversational", "arxiv:2312.11805", "arxiv:2009.03300", "arxiv:1905.07830", "arxiv:1911.11641", "arxiv:1904.09728", "arxiv:1905.10044", "arxiv:1907.10641", "arxiv:1811.00937", "arxiv:1809.02789", "arxiv:1911.01547", "arxiv:1705.03551", "arxiv:2107.03374", "arxiv:2108.07732", "arxiv:2110.14168", "arxiv:2304.06364", "arxiv:2206.04615", "arxiv:1804.06876", "arxiv:2110.08193", "arxiv:2009.11462", "arxiv:2101.11718", "arxiv:1804.09301", "arxiv:2109.07958", "arxiv:2203.09509", "license:gemma", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2024-02-08T13:23:59Z"
--- library_name: transformers widget: - messages: - role: user content: How does the brain work? inference: parameters: max_new_tokens: 200 extra_gated_heading: Access Gemma on Hugging Face extra_gated_prompt: >- To access Gemma on Hugging Face, you’re required to review and agree to Google’s usage license. To do this, please ensure you’re logged-in to Hugging Face and click below. Requests are processed immediately. extra_gated_button_content: Acknowledge license license: gemma --- # Gemma Model Card **Model Page**: [Gemma](https://ai.google.dev/gemma/docs) This model card corresponds to the 2B instruct version of the Gemma model. You can also visit the model card of the [2B base model](https://huggingface.co/google/gemma-2b), [7B base model](https://huggingface.co/google/gemma-7b), and [7B instruct model](https://huggingface.co/google/gemma-7b-it). **Resources and Technical Documentation**: * [Responsible Generative AI Toolkit](https://ai.google.dev/responsible) * [Gemma on Kaggle](https://www.kaggle.com/models/google/gemma) * [Gemma on Vertex Model Garden](https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/335?version=gemma-2b-it-gg-hf) **Terms of Use**: [Terms](https://www.kaggle.com/models/google/gemma/license/consent) **Authors**: Google ## Model Information Summary description and brief definition of inputs and outputs. ### Description Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models. They are text-to-text, decoder-only large language models, available in English, with open weights, pre-trained variants, and instruction-tuned variants. Gemma models are well-suited for a variety of text generation tasks, including question answering, summarization, and reasoning. Their relatively small size makes it possible to deploy them in environments with limited resources such as a laptop, desktop or your own cloud infrastructure, democratizing access to state of the art AI models and helping foster innovation for everyone. ### Usage Below we share some code snippets on how to get quickly started with running the model. First make sure to `pip install -U transformers`, then copy the snippet from the section that is relevant for your usecase. #### Running the model on a CPU As explained below, we recommend `torch.bfloat16` as the default dtype. You can use [a different precision](#precisions) if necessary. ```python from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-2b-it", torch_dtype=torch.bfloat16 ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Running the model on a single / multi GPU ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-2b-it", device_map="auto", torch_dtype=torch.bfloat16 ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` <a name="precisions"></a> #### Running the model on a GPU using different precisions The native weights of this model were exported in `bfloat16` precision. You can use `float16`, which may be faster on certain hardware, indicating the `torch_dtype` when loading the model. For convenience, the `float16` revision of the repo contains a copy of the weights already converted to that precision. You can also use `float32` if you skip the dtype, but no precision increase will occur (model weights will just be upcasted to `float32`). See examples below. * _Using `torch.float16`_ ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-2b-it", device_map="auto", torch_dtype=torch.float16, revision="float16", ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` * _Upcasting to `torch.float32`_ ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-2b-it", device_map="auto" ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Quantized Versions through `bitsandbytes` * _Using 8-bit precision (int8)_ ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_8bit=True) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it", quantization_config=quantization_config) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` * _Using 4-bit precision_ ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_4bit=True) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it", quantization_config=quantization_config) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Other optimizations * _Flash Attention 2_ First make sure to install `flash-attn` in your environment `pip install flash-attn` ```diff model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, + attn_implementation="flash_attention_2" ).to(0) ``` ### Chat Template The instruction-tuned models use a chat template that must be adhered to for conversational use. The easiest way to apply it is using the tokenizer's built-in chat template, as shown in the following snippet. Let's load the model and apply the chat template to a conversation. In this example, we'll start with a single user interaction: ```py from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model_id = "gg-hf/gemma-2b-it" dtype = torch.bfloat16 tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, device_map="cuda", torch_dtype=dtype, ) chat = [ { "role": "user", "content": "Write a hello world program" }, ] prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True) ``` At this point, the prompt contains the following text: ``` <bos><start_of_turn>user Write a hello world program<end_of_turn> <start_of_turn>model ``` As you can see, each turn is preceded by a `<start_of_turn>` delimiter and then the role of the entity (either `user`, for content supplied by the user, or `model` for LLM responses). Turns finish with the `<end_of_turn>` token. You can follow this format to build the prompt manually, if you need to do it without the tokenizer's chat template. After the prompt is ready, generation can be performed like this: ```py inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt") outputs = model.generate(input_ids=inputs.to(model.device), max_new_tokens=150) ``` ### Fine-tuning You can find some fine-tuning scripts under the [`examples/` directory](https://huggingface.co/google/gemma-7b/tree/main/examples) of [`google/gemma-7b`](https://huggingface.co/google/gemma-7b) repository. To adapt them to this model, simply change the model-id to `google/gemma-2b-it`. We provide: * A script to perform Supervised Fine-Tuning (SFT) on UltraChat dataset using QLoRA * A script to perform SFT using FSDP on TPU devices * A notebook that you can run on a free-tier Google Colab instance to perform SFT on the English quotes dataset ### Inputs and outputs * **Input:** Text string, such as a question, a prompt, or a document to be summarized. * **Output:** Generated English-language text in response to the input, such as an answer to a question, or a summary of a document. ## Model Data Data used for model training and how the data was processed. ### Training Dataset These models were trained on a dataset of text data that includes a wide variety of sources, totaling 6 trillion tokens. Here are the key components: * Web Documents: A diverse collection of web text ensures the model is exposed to a broad range of linguistic styles, topics, and vocabulary. Primarily English-language content. * Code: Exposing the model to code helps it to learn the syntax and patterns of programming languages, which improves its ability to generate code or understand code-related questions. * Mathematics: Training on mathematical text helps the model learn logical reasoning, symbolic representation, and to address mathematical queries. The combination of these diverse data sources is crucial for training a powerful language model that can handle a wide variety of different tasks and text formats. ### Data Preprocessing Here are the key data cleaning and filtering methods applied to the training data: * CSAM Filtering: Rigorous CSAM (Child Sexual Abuse Material) filtering was applied at multiple stages in the data preparation process to ensure the exclusion of harmful and illegal content * Sensitive Data Filtering: As part of making Gemma pre-trained models safe and reliable, automated techniques were used to filter out certain personal information and other sensitive data from training sets. * Additional methods: Filtering based on content quality and safely in line with [our policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11). ## Implementation Information Details about the model internals. ### Hardware Gemma was trained using the latest generation of [Tensor Processing Unit (TPU)](https://cloud.google.com/tpu/docs/intro-to-tpu) hardware (TPUv5e). Training large language models requires significant computational power. TPUs, designed specifically for matrix operations common in machine learning, offer several advantages in this domain: * Performance: TPUs are specifically designed to handle the massive computations involved in training LLMs. They can speed up training considerably compared to CPUs. * Memory: TPUs often come with large amounts of high-bandwidth memory, allowing for the handling of large models and batch sizes during training. This can lead to better model quality. * Scalability: TPU Pods (large clusters of TPUs) provide a scalable solution for handling the growing complexity of large foundation models. You can distribute training across multiple TPU devices for faster and more efficient processing. * Cost-effectiveness: In many scenarios, TPUs can provide a more cost-effective solution for training large models compared to CPU-based infrastructure, especially when considering the time and resources saved due to faster training. * These advantages are aligned with [Google's commitments to operate sustainably](https://sustainability.google/operating-sustainably/). ### Software Training was done using [JAX](https://github.com/google/jax) and [ML Pathways](https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture/ml-pathways). JAX allows researchers to take advantage of the latest generation of hardware, including TPUs, for faster and more efficient training of large models. ML Pathways is Google's latest effort to build artificially intelligent systems capable of generalizing across multiple tasks. This is specially suitable for [foundation models](https://ai.google/discover/foundation-models/), including large language models like these ones. Together, JAX and ML Pathways are used as described in the [paper about the Gemini family of models](https://arxiv.org/abs/2312.11805); "the 'single controller' programming model of Jax and Pathways allows a single Python process to orchestrate the entire training run, dramatically simplifying the development workflow." ## Evaluation Model evaluation metrics and results. ### Benchmark Results These models were evaluated against a large collection of different datasets and metrics to cover different aspects of text generation: | Benchmark | Metric | 2B Params | 7B Params | | ------------------------------ | ------------- | ----------- | --------- | | [MMLU](https://arxiv.org/abs/2009.03300) | 5-shot, top-1 | 42.3 | 64.3 | | [HellaSwag](https://arxiv.org/abs/1905.07830) | 0-shot |71.4 | 81.2 | | [PIQA](https://arxiv.org/abs/1911.11641) | 0-shot | 77.3 | 81.2 | | [SocialIQA](https://arxiv.org/abs/1904.09728) | 0-shot | 49.7 | 51.8 | | [BooIQ](https://arxiv.org/abs/1905.10044) | 0-shot | 69.4 | 83.2 | | [WinoGrande](https://arxiv.org/abs/1907.10641) | partial score | 65.4 | 72.3 | | [CommonsenseQA](https://arxiv.org/abs/1811.00937) | 7-shot | 65.3 | 71.3 | | [OpenBookQA](https://arxiv.org/abs/1809.02789) | | 47.8 | 52.8 | | [ARC-e](https://arxiv.org/abs/1911.01547) | | 73.2 | 81.5 | | [ARC-c](https://arxiv.org/abs/1911.01547) | | 42.1 | 53.2 | | [TriviaQA](https://arxiv.org/abs/1705.03551) | 5-shot | 53.2 | 63.4 | | [Natural Questions](https://github.com/google-research-datasets/natural-questions) | 5-shot | 12.5 | 23 | | [HumanEval](https://arxiv.org/abs/2107.03374) | pass@1 | 22.0 | 32.3 | | [MBPP](https://arxiv.org/abs/2108.07732) | 3-shot | 29.2 | 44.4 | | [GSM8K](https://arxiv.org/abs/2110.14168) | maj@1 | 17.7 | 46.4 | | [MATH](https://arxiv.org/abs/2108.07732) | 4-shot | 11.8 | 24.3 | | [AGIEval](https://arxiv.org/abs/2304.06364) | | 24.2 | 41.7 | | [BIG-Bench](https://arxiv.org/abs/2206.04615) | | 35.2 | 55.1 | | ------------------------------ | ------------- | ----------- | --------- | | **Average** | | **45.0** | **56.9** | ## Ethics and Safety Ethics and safety evaluation approach and results. ### Evaluation Approach Our evaluation methods include structured evaluations and internal red-teaming testing of relevant content policies. Red-teaming was conducted by a number of different teams, each with different goals and human evaluation metrics. These models were evaluated against a number of different categories relevant to ethics and safety, including: * Text-to-Text Content Safety: Human evaluation on prompts covering safety policies including child sexual abuse and exploitation, harassment, violence and gore, and hate speech. * Text-to-Text Representational Harms: Benchmark against relevant academic datasets such as [WinoBias](https://arxiv.org/abs/1804.06876) and [BBQ Dataset](https://arxiv.org/abs/2110.08193v2). * Memorization: Automated evaluation of memorization of training data, including the risk of personally identifiable information exposure. * Large-scale harm: Tests for "dangerous capabilities," such as chemical, biological, radiological, and nuclear (CBRN) risks. ### Evaluation Results The results of ethics and safety evaluations are within acceptable thresholds for meeting [internal policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11) for categories such as child safety, content safety, representational harms, memorization, large-scale harms. On top of robust internal evaluations, the results of well known safety benchmarks like BBQ, BOLD, Winogender, Winobias, RealToxicity, and TruthfulQA are shown here. | Benchmark | Metric | 2B Params | 7B Params | | ------------------------------ | ------------- | ----------- | --------- | | [RealToxicity](https://arxiv.org/abs/2009.11462) | average | 6.86 | 7.90 | | [BOLD](https://arxiv.org/abs/2101.11718) | | 45.57 | 49.08 | | [CrowS-Pairs](https://aclanthology.org/2020.emnlp-main.154/) | top-1 | 45.82 | 51.33 | | [BBQ Ambig](https://arxiv.org/abs/2110.08193v2) | 1-shot, top-1 | 62.58 | 92.54 | | [BBQ Disambig](https://arxiv.org/abs/2110.08193v2) | top-1 | 54.62 | 71.99 | | [Winogender](https://arxiv.org/abs/1804.09301) | top-1 | 51.25 | 54.17 | | [TruthfulQA](https://arxiv.org/abs/2109.07958) | | 44.84 | 31.81 | | [Winobias 1_2](https://arxiv.org/abs/1804.06876) | | 56.12 | 59.09 | | [Winobias 2_2](https://arxiv.org/abs/1804.06876) | | 91.10 | 92.23 | | [Toxigen](https://arxiv.org/abs/2203.09509) | | 29.77 | 39.59 | | ------------------------------ | ------------- | ----------- | --------- | ## Usage and Limitations These models have certain limitations that users should be aware of. ### Intended Usage Open Large Language Models (LLMs) have a wide range of applications across various industries and domains. The following list of potential uses is not comprehensive. The purpose of this list is to provide contextual information about the possible use-cases that the model creators considered as part of model training and development. * Content Creation and Communication * Text Generation: These models can be used to generate creative text formats such as poems, scripts, code, marketing copy, and email drafts. * Chatbots and Conversational AI: Power conversational interfaces for customer service, virtual assistants, or interactive applications. * Text Summarization: Generate concise summaries of a text corpus, research papers, or reports. * Research and Education * Natural Language Processing (NLP) Research: These models can serve as a foundation for researchers to experiment with NLP techniques, develop algorithms, and contribute to the advancement of the field. * Language Learning Tools: Support interactive language learning experiences, aiding in grammar correction or providing writing practice. * Knowledge Exploration: Assist researchers in exploring large bodies of text by generating summaries or answering questions about specific topics. ### Limitations * Training Data * The quality and diversity of the training data significantly influence the model's capabilities. Biases or gaps in the training data can lead to limitations in the model's responses. * The scope of the training dataset determines the subject areas the model can handle effectively. * Context and Task Complexity * LLMs are better at tasks that can be framed with clear prompts and instructions. Open-ended or highly complex tasks might be challenging. * A model's performance can be influenced by the amount of context provided (longer context generally leads to better outputs, up to a certain point). * Language Ambiguity and Nuance * Natural language is inherently complex. LLMs might struggle to grasp subtle nuances, sarcasm, or figurative language. * Factual Accuracy * LLMs generate responses based on information they learned from their training datasets, but they are not knowledge bases. They may generate incorrect or outdated factual statements. * Common Sense * LLMs rely on statistical patterns in language. They might lack the ability to apply common sense reasoning in certain situations. ### Ethical Considerations and Risks The development of large language models (LLMs) raises several ethical concerns. In creating an open model, we have carefully considered the following: * Bias and Fairness * LLMs trained on large-scale, real-world text data can reflect socio-cultural biases embedded in the training material. These models underwent careful scrutiny, input data pre-processing described and posterior evaluations reported in this card. * Misinformation and Misuse * LLMs can be misused to generate text that is false, misleading, or harmful. * Guidelines are provided for responsible use with the model, see the [Responsible Generative AI Toolkit](http://ai.google.dev/gemma/responsible). * Transparency and Accountability: * This model card summarizes details on the models' architecture, capabilities, limitations, and evaluation processes. * A responsibly developed open model offers the opportunity to share innovation by making LLM technology accessible to developers and researchers across the AI ecosystem. Risks identified and mitigations: * Perpetuation of biases: It's encouraged to perform continuous monitoring (using evaluation metrics, human review) and the exploration of de-biasing techniques during model training, fine-tuning, and other use cases. * Generation of harmful content: Mechanisms and guidelines for content safety are essential. Developers are encouraged to exercise caution and implement appropriate content safety safeguards based on their specific product policies and application use cases. * Misuse for malicious purposes: Technical limitations and developer and end-user education can help mitigate against malicious applications of LLMs. Educational resources and reporting mechanisms for users to flag misuse are provided. Prohibited uses of Gemma models are outlined in the [Gemma Prohibited Use Policy](https://ai.google.dev/gemma/prohibited_use_policy). * Privacy violations: Models were trained on data filtered for removal of PII (Personally Identifiable Information). Developers are encouraged to adhere to privacy regulations with privacy-preserving techniques. ### Benefits At the time of release, this family of models provides high-performance open large language model implementations designed from the ground up for Responsible AI development compared to similarly sized models. Using the benchmark evaluation metrics described in this document, these models have shown to provide superior performance to other, comparably-sized open model alternatives.
microsoft/git-base
microsoft
"2023-04-24T09:52:15Z"
407,946
42
transformers
[ "transformers", "pytorch", "safetensors", "git", "text-generation", "vision", "image-to-text", "image-captioning", "en", "arxiv:2205.14100", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
image-to-text
"2022-12-06T09:22:35Z"
--- language: en license: mit tags: - vision - image-to-text - image-captioning model_name: microsoft/git-base pipeline_tag: image-to-text --- # GIT (GenerativeImage2Text), base-sized GIT (short for GenerativeImage2Text) model, base-sized version. It was introduced in the paper [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) by Wang et al. and first released in [this repository](https://github.com/microsoft/GenerativeImage2Text). Disclaimer: The team releasing GIT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description GIT is a Transformer decoder conditioned on both CLIP image tokens and text tokens. The model is trained using "teacher forcing" on a lot of (image, text) pairs. The goal for the model is simply to predict the next text token, giving the image tokens and previous text tokens. The model has full access to (i.e. a bidirectional attention mask is used for) the image patch tokens, but only has access to the previous text tokens (i.e. a causal attention mask is used for the text tokens) when predicting the next text token. ![GIT architecture](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/git_architecture.jpg) This allows the model to be used for tasks like: - image and video captioning - visual question answering (VQA) on images and videos - even image classification (by simply conditioning the model on the image and asking it to generate a class for it in text). ## Intended uses & limitations You can use the raw model for image captioning. See the [model hub](https://huggingface.co/models?search=microsoft/git) to look for fine-tuned versions on a task that interests you. ### How to use For code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/main/model_doc/git#transformers.GitForCausalLM.forward.example). ## Training data From the paper: > We collect 0.8B image-text pairs for pre-training, which include COCO (Lin et al., 2014), Conceptual Captions (CC3M) (Sharma et al., 2018), SBU (Ordonez et al., 2011), Visual Genome (VG) (Krishna et al., 2016), Conceptual Captions (CC12M) (Changpinyo et al., 2021), ALT200M (Hu et al., 2021a), and an extra 0.6B data following a similar collection procedure in Hu et al. (2021a). => however this is for the model referred to as "GIT" in the paper, which is not open-sourced. This checkpoint is "GIT-base", which is a smaller variant of GIT trained on 10 million image-text pairs. See table 11 in the [paper](https://arxiv.org/abs/2205.14100) for more details. ### Preprocessing We refer to the original repo regarding details for preprocessing during training. During validation, one resizes the shorter edge of each image, after which center cropping is performed to a fixed-size resolution. Next, frames are normalized across the RGB channels with the ImageNet mean and standard deviation. ## Evaluation results For evaluation results, we refer readers to the [paper](https://arxiv.org/abs/2205.14100).
facebook/mbart-large-50-many-to-many-mmt
facebook
"2023-09-28T16:42:59Z"
407,787
207
transformers
[ "transformers", "pytorch", "tf", "jax", "rust", "safetensors", "mbart", "text2text-generation", "mbart-50", "translation", "multilingual", "ar", "cs", "de", "en", "es", "et", "fi", "fr", "gu", "hi", "it", "ja", "kk", "ko", "lt", "lv", "my", "ne", "nl", "ro", "ru", "si", "tr", "vi", "zh", "af", "az", "bn", "fa", "he", "hr", "id", "ka", "km", "mk", "ml", "mn", "mr", "pl", "ps", "pt", "sv", "sw", "ta", "te", "th", "tl", "uk", "ur", "xh", "gl", "sl", "arxiv:2008.00401", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
translation
"2022-03-02T23:29:05Z"
--- language: - multilingual - ar - cs - de - en - es - et - fi - fr - gu - hi - it - ja - kk - ko - lt - lv - my - ne - nl - ro - ru - si - tr - vi - zh - af - az - bn - fa - he - hr - id - ka - km - mk - ml - mn - mr - pl - ps - pt - sv - sw - ta - te - th - tl - uk - ur - xh - gl - sl tags: - mbart-50 pipeline_tag: translation --- # mBART-50 many to many multilingual machine translation This model is a fine-tuned checkpoint of [mBART-large-50](https://huggingface.co/facebook/mbart-large-50). `mbart-large-50-many-to-many-mmt` is fine-tuned for multilingual machine translation. It was introduced in [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) paper. The model can translate directly between any pair of 50 languages. To translate into a target language, the target language id is forced as the first generated token. To force the target language id as the first generated token, pass the `forced_bos_token_id` parameter to the `generate` method. ```python from transformers import MBartForConditionalGeneration, MBart50TokenizerFast article_hi = "संयुक्त राष्ट्र के प्रमुख का कहना है कि सीरिया में कोई सैन्य समाधान नहीं है" article_ar = "الأمين العام للأمم المتحدة يقول إنه لا يوجد حل عسكري في سوريا." model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") # translate Hindi to French tokenizer.src_lang = "hi_IN" encoded_hi = tokenizer(article_hi, return_tensors="pt") generated_tokens = model.generate( **encoded_hi, forced_bos_token_id=tokenizer.lang_code_to_id["fr_XX"] ) tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) # => "Le chef de l 'ONU affirme qu 'il n 'y a pas de solution militaire dans la Syrie." # translate Arabic to English tokenizer.src_lang = "ar_AR" encoded_ar = tokenizer(article_ar, return_tensors="pt") generated_tokens = model.generate( **encoded_ar, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"] ) tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) # => "The Secretary-General of the United Nations says there is no military solution in Syria." ``` See the [model hub](https://huggingface.co/models?filter=mbart-50) to look for more fine-tuned versions. ## Languages covered Arabic (ar_AR), Czech (cs_CZ), German (de_DE), English (en_XX), Spanish (es_XX), Estonian (et_EE), Finnish (fi_FI), French (fr_XX), Gujarati (gu_IN), Hindi (hi_IN), Italian (it_IT), Japanese (ja_XX), Kazakh (kk_KZ), Korean (ko_KR), Lithuanian (lt_LT), Latvian (lv_LV), Burmese (my_MM), Nepali (ne_NP), Dutch (nl_XX), Romanian (ro_RO), Russian (ru_RU), Sinhala (si_LK), Turkish (tr_TR), Vietnamese (vi_VN), Chinese (zh_CN), Afrikaans (af_ZA), Azerbaijani (az_AZ), Bengali (bn_IN), Persian (fa_IR), Hebrew (he_IL), Croatian (hr_HR), Indonesian (id_ID), Georgian (ka_GE), Khmer (km_KH), Macedonian (mk_MK), Malayalam (ml_IN), Mongolian (mn_MN), Marathi (mr_IN), Polish (pl_PL), Pashto (ps_AF), Portuguese (pt_XX), Swedish (sv_SE), Swahili (sw_KE), Tamil (ta_IN), Telugu (te_IN), Thai (th_TH), Tagalog (tl_XX), Ukrainian (uk_UA), Urdu (ur_PK), Xhosa (xh_ZA), Galician (gl_ES), Slovene (sl_SI) ## BibTeX entry and citation info ``` @article{tang2020multilingual, title={Multilingual Translation with Extensible Multilingual Pretraining and Finetuning}, author={Yuqing Tang and Chau Tran and Xian Li and Peng-Jen Chen and Naman Goyal and Vishrav Chaudhary and Jiatao Gu and Angela Fan}, year={2020}, eprint={2008.00401}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
sentence-transformers/paraphrase-TinyBERT-L6-v2
sentence-transformers
"2024-03-27T12:12:17Z"
407,611
2
sentence-transformers
[ "sentence-transformers", "pytorch", "tf", "safetensors", "bert", "feature-extraction", "sentence-similarity", "transformers", "arxiv:1908.10084", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
sentence-similarity
"2022-03-02T23:29:05Z"
--- license: apache-2.0 library_name: sentence-transformers tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers pipeline_tag: sentence-similarity --- # sentence-transformers/paraphrase-TinyBERT-L6-v2 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('sentence-transformers/paraphrase-TinyBERT-L6-v2') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/paraphrase-TinyBERT-L6-v2') model = AutoModel.from_pretrained('sentence-transformers/paraphrase-TinyBERT-L6-v2') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, max pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=sentence-transformers/paraphrase-TinyBERT-L6-v2) ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors This model was trained by [sentence-transformers](https://www.sbert.net/). If you find this model helpful, feel free to cite our publication [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](https://arxiv.org/abs/1908.10084): ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "http://arxiv.org/abs/1908.10084", } ```
google/flan-t5-large
google
"2023-07-17T12:49:05Z"
405,996
448
transformers
[ "transformers", "pytorch", "tf", "jax", "safetensors", "t5", "text2text-generation", "en", "fr", "ro", "de", "multilingual", "dataset:svakulenk0/qrecc", "dataset:taskmaster2", "dataset:djaym7/wiki_dialog", "dataset:deepmind/code_contests", "dataset:lambada", "dataset:gsm8k", "dataset:aqua_rat", "dataset:esnli", "dataset:quasc", "dataset:qed", "arxiv:2210.11416", "arxiv:1910.09700", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text2text-generation
"2022-10-21T10:07:08Z"
--- language: - en - fr - ro - de - multilingual widget: - text: "Translate to German: My name is Arthur" example_title: "Translation" - text: "Please answer to the following question. Who is going to be the next Ballon d'or?" example_title: "Question Answering" - text: "Q: Can Geoffrey Hinton have a conversation with George Washington? Give the rationale before answering." example_title: "Logical reasoning" - text: "Please answer the following question. What is the boiling point of Nitrogen?" example_title: "Scientific knowledge" - text: "Answer the following yes/no question. Can you write a whole Haiku in a single tweet?" example_title: "Yes/no question" - text: "Answer the following yes/no question by reasoning step-by-step. Can you write a whole Haiku in a single tweet?" example_title: "Reasoning task" - text: "Q: ( False or not False or False ) is? A: Let's think step by step" example_title: "Boolean Expressions" - text: "The square root of x is the cube root of y. What is y to the power of 2, if x = 4?" example_title: "Math reasoning" - text: "Premise: At my age you will probably have learnt one lesson. Hypothesis: It's not certain how many lessons you'll learn by your thirties. Does the premise entail the hypothesis?" example_title: "Premise and hypothesis" tags: - text2text-generation datasets: - svakulenk0/qrecc - taskmaster2 - djaym7/wiki_dialog - deepmind/code_contests - lambada - gsm8k - aqua_rat - esnli - quasc - qed license: apache-2.0 --- # Model Card for FLAN-T5 large <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/flan2_architecture.jpg" alt="drawing" width="600"/> # Table of Contents 0. [TL;DR](#TL;DR) 1. [Model Details](#model-details) 2. [Usage](#usage) 3. [Uses](#uses) 4. [Bias, Risks, and Limitations](#bias-risks-and-limitations) 5. [Training Details](#training-details) 6. [Evaluation](#evaluation) 7. [Environmental Impact](#environmental-impact) 8. [Citation](#citation) 9. [Model Card Authors](#model-card-authors) # TL;DR If you already know T5, FLAN-T5 is just better at everything. For the same number of parameters, these models have been fine-tuned on more than 1000 additional tasks covering also more languages. As mentioned in the first few lines of the abstract : > Flan-PaLM 540B achieves state-of-the-art performance on several benchmarks, such as 75.2% on five-shot MMLU. We also publicly release Flan-T5 checkpoints,1 which achieve strong few-shot performance even compared to much larger models, such as PaLM 62B. Overall, instruction finetuning is a general method for improving the performance and usability of pretrained language models. **Disclaimer**: Content from **this** model card has been written by the Hugging Face team, and parts of it were copy pasted from the [T5 model card](https://huggingface.co/t5-large). # Model Details ## Model Description - **Model type:** Language model - **Language(s) (NLP):** English, Spanish, Japanese, Persian, Hindi, French, Chinese, Bengali, Gujarati, German, Telugu, Italian, Arabic, Polish, Tamil, Marathi, Malayalam, Oriya, Panjabi, Portuguese, Urdu, Galician, Hebrew, Korean, Catalan, Thai, Dutch, Indonesian, Vietnamese, Bulgarian, Filipino, Central Khmer, Lao, Turkish, Russian, Croatian, Swedish, Yoruba, Kurdish, Burmese, Malay, Czech, Finnish, Somali, Tagalog, Swahili, Sinhala, Kannada, Zhuang, Igbo, Xhosa, Romanian, Haitian, Estonian, Slovak, Lithuanian, Greek, Nepali, Assamese, Norwegian - **License:** Apache 2.0 - **Related Models:** [All FLAN-T5 Checkpoints](https://huggingface.co/models?search=flan-t5) - **Original Checkpoints:** [All Original FLAN-T5 Checkpoints](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-t5-checkpoints) - **Resources for more information:** - [Research paper](https://arxiv.org/pdf/2210.11416.pdf) - [GitHub Repo](https://github.com/google-research/t5x) - [Hugging Face FLAN-T5 Docs (Similar to T5) ](https://huggingface.co/docs/transformers/model_doc/t5) # Usage Find below some example scripts on how to use the model in `transformers`: ## Using the Pytorch model ### Running the model on a CPU <details> <summary> Click to expand </summary> ```python from transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large") model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large") input_text = "translate English to German: How old are you?" input_ids = tokenizer(input_text, return_tensors="pt").input_ids outputs = model.generate(input_ids) print(tokenizer.decode(outputs[0])) ``` </details> ### Running the model on a GPU <details> <summary> Click to expand </summary> ```python # pip install accelerate from transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large") model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large", device_map="auto") input_text = "translate English to German: How old are you?" input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda") outputs = model.generate(input_ids) print(tokenizer.decode(outputs[0])) ``` </details> ### Running the model on a GPU using different precisions #### FP16 <details> <summary> Click to expand </summary> ```python # pip install accelerate import torch from transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large") model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large", device_map="auto", torch_dtype=torch.float16) input_text = "translate English to German: How old are you?" input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda") outputs = model.generate(input_ids) print(tokenizer.decode(outputs[0])) ``` </details> #### INT8 <details> <summary> Click to expand </summary> ```python # pip install bitsandbytes accelerate from transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large") model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large", device_map="auto", load_in_8bit=True) input_text = "translate English to German: How old are you?" input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda") outputs = model.generate(input_ids) print(tokenizer.decode(outputs[0])) ``` </details> # Uses ## Direct Use and Downstream Use The authors write in [the original paper's model card](https://arxiv.org/pdf/2210.11416.pdf) that: > The primary use is research on language models, including: research on zero-shot NLP tasks and in-context few-shot learning NLP tasks, such as reasoning, and question answering; advancing fairness and safety research, and understanding limitations of current large language models See the [research paper](https://arxiv.org/pdf/2210.11416.pdf) for further details. ## Out-of-Scope Use More information needed. # Bias, Risks, and Limitations The information below in this section are copied from the model's [official model card](https://arxiv.org/pdf/2210.11416.pdf): > Language models, including Flan-T5, can potentially be used for language generation in a harmful way, according to Rae et al. (2021). Flan-T5 should not be used directly in any application, without a prior assessment of safety and fairness concerns specific to the application. ## Ethical considerations and risks > Flan-T5 is fine-tuned on a large corpus of text data that was not filtered for explicit content or assessed for existing biases. As a result the model itself is potentially vulnerable to generating equivalently inappropriate content or replicating inherent biases in the underlying data. ## Known Limitations > Flan-T5 has not been tested in real world applications. ## Sensitive Use: > Flan-T5 should not be applied for any unacceptable use cases, e.g., generation of abusive speech. # Training Details ## Training Data The model was trained on a mixture of tasks, that includes the tasks described in the table below (from the original paper, figure 2): ![table.png](https://s3.amazonaws.com/moonup/production/uploads/1666363265279-62441d1d9fdefb55a0b7d12c.png) ## Training Procedure According to the model card from the [original paper](https://arxiv.org/pdf/2210.11416.pdf): > These models are based on pretrained T5 (Raffel et al., 2020) and fine-tuned with instructions for better zero-shot and few-shot performance. There is one fine-tuned Flan model per T5 model size. The model has been trained on TPU v3 or TPU v4 pods, using [`t5x`](https://github.com/google-research/t5x) codebase together with [`jax`](https://github.com/google/jax). # Evaluation ## Testing Data, Factors & Metrics The authors evaluated the model on various tasks covering several languages (1836 in total). See the table below for some quantitative evaluation: ![image.png](https://s3.amazonaws.com/moonup/production/uploads/1668072995230-62441d1d9fdefb55a0b7d12c.png) For full details, please check the [research paper](https://arxiv.org/pdf/2210.11416.pdf). ## Results For full results for FLAN-T5-Large, see the [research paper](https://arxiv.org/pdf/2210.11416.pdf), Table 3. # Environmental Impact Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** Google Cloud TPU Pods - TPU v3 or TPU v4 | Number of chips ≥ 4. - **Hours used:** More information needed - **Cloud Provider:** GCP - **Compute Region:** More information needed - **Carbon Emitted:** More information needed # Citation **BibTeX:** ```bibtex @misc{https://doi.org/10.48550/arxiv.2210.11416, doi = {10.48550/ARXIV.2210.11416}, url = {https://arxiv.org/abs/2210.11416}, author = {Chung, Hyung Won and Hou, Le and Longpre, Shayne and Zoph, Barret and Tay, Yi and Fedus, William and Li, Eric and Wang, Xuezhi and Dehghani, Mostafa and Brahma, Siddhartha and Webson, Albert and Gu, Shixiang Shane and Dai, Zhuyun and Suzgun, Mirac and Chen, Xinyun and Chowdhery, Aakanksha and Narang, Sharan and Mishra, Gaurav and Yu, Adams and Zhao, Vincent and Huang, Yanping and Dai, Andrew and Yu, Hongkun and Petrov, Slav and Chi, Ed H. and Dean, Jeff and Devlin, Jacob and Roberts, Adam and Zhou, Denny and Le, Quoc V. and Wei, Jason}, keywords = {Machine Learning (cs.LG), Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Scaling Instruction-Finetuned Language Models}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
bigscience/bloomz-560m
bigscience
"2023-05-27T17:27:11Z"
403,209
94
transformers
[ "transformers", "pytorch", "tensorboard", "safetensors", "bloom", "text-generation", "ak", "ar", "as", "bm", "bn", "ca", "code", "en", "es", "eu", "fon", "fr", "gu", "hi", "id", "ig", "ki", "kn", "lg", "ln", "ml", "mr", "ne", "nso", "ny", "or", "pa", "pt", "rn", "rw", "sn", "st", "sw", "ta", "te", "tn", "ts", "tum", "tw", "ur", "vi", "wo", "xh", "yo", "zh", "zu", "dataset:bigscience/xP3", "arxiv:2211.01786", "license:bigscience-bloom-rail-1.0", "model-index", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2022-10-08T16:14:42Z"
--- datasets: - bigscience/xP3 license: bigscience-bloom-rail-1.0 language: - ak - ar - as - bm - bn - ca - code - en - es - eu - fon - fr - gu - hi - id - ig - ki - kn - lg - ln - ml - mr - ne - nso - ny - or - pa - pt - rn - rw - sn - st - sw - ta - te - tn - ts - tum - tw - ur - vi - wo - xh - yo - zh - zu programming_language: - C - C++ - C# - Go - Java - JavaScript - Lua - PHP - Python - Ruby - Rust - Scala - TypeScript pipeline_tag: text-generation widget: - text: "一个传奇的开端,一个不灭的神话,这不仅仅是一部电影,而是作为一个走进新时代的标签,永远彪炳史册。Would you rate the previous review as positive, neutral or negative?" example_title: "zh-en sentiment" - text: "一个传奇的开端,一个不灭的神话,这不仅仅是一部电影,而是作为一个走进新时代的标签,永远彪炳史册。你认为这句话的立场是赞扬、中立还是批评?" example_title: "zh-zh sentiment" - text: "Suggest at least five related search terms to \"Mạng neural nhân tạo\"." example_title: "vi-en query" - text: "Proposez au moins cinq mots clés concernant «Réseau de neurones artificiels»." example_title: "fr-fr query" - text: "Explain in a sentence in Telugu what is backpropagation in neural networks." example_title: "te-en qa" - text: "Why is the sky blue?" example_title: "en-en qa" - text: "Write a fairy tale about a troll saving a princess from a dangerous dragon. The fairy tale is a masterpiece that has achieved praise worldwide and its moral is \"Heroes Come in All Shapes and Sizes\". Story (in Spanish):" example_title: "es-en fable" - text: "Write a fable about wood elves living in a forest that is suddenly invaded by ogres. The fable is a masterpiece that has achieved praise worldwide and its moral is \"Violence is the last refuge of the incompetent\". Fable (in Hindi):" example_title: "hi-en fable" model-index: - name: bloomz-560m results: - task: type: Coreference resolution dataset: type: winogrande name: Winogrande XL (xl) config: xl split: validation revision: a80f460359d1e9a67c006011c94de42a8759430c metrics: - type: Accuracy value: 52.41 - task: type: Coreference resolution dataset: type: Muennighoff/xwinograd name: XWinograd (en) config: en split: test revision: 9dd5ea5505fad86b7bedad667955577815300cee metrics: - type: Accuracy value: 51.01 - task: type: Coreference resolution dataset: type: Muennighoff/xwinograd name: XWinograd (fr) config: fr split: test revision: 9dd5ea5505fad86b7bedad667955577815300cee metrics: - type: Accuracy value: 51.81 - task: type: Coreference resolution dataset: type: Muennighoff/xwinograd name: XWinograd (jp) config: jp split: test revision: 9dd5ea5505fad86b7bedad667955577815300cee metrics: - type: Accuracy value: 52.03 - task: type: Coreference resolution dataset: type: Muennighoff/xwinograd name: XWinograd (pt) config: pt split: test revision: 9dd5ea5505fad86b7bedad667955577815300cee metrics: - type: Accuracy value: 53.99 - task: type: Coreference resolution dataset: type: Muennighoff/xwinograd name: XWinograd (ru) config: ru split: test revision: 9dd5ea5505fad86b7bedad667955577815300cee metrics: - type: Accuracy value: 53.97 - task: type: Coreference resolution dataset: type: Muennighoff/xwinograd name: XWinograd (zh) config: zh split: test revision: 9dd5ea5505fad86b7bedad667955577815300cee metrics: - type: Accuracy value: 54.76 - task: type: Natural language inference dataset: type: anli name: ANLI (r1) config: r1 split: validation revision: 9dbd830a06fea8b1c49d6e5ef2004a08d9f45094 metrics: - type: Accuracy value: 33.4 - task: type: Natural language inference dataset: type: anli name: ANLI (r2) config: r2 split: validation revision: 9dbd830a06fea8b1c49d6e5ef2004a08d9f45094 metrics: - type: Accuracy value: 33.4 - task: type: Natural language inference dataset: type: anli name: ANLI (r3) config: r3 split: validation revision: 9dbd830a06fea8b1c49d6e5ef2004a08d9f45094 metrics: - type: Accuracy value: 33.5 - task: type: Natural language inference dataset: type: super_glue name: SuperGLUE (cb) config: cb split: validation revision: 9e12063561e7e6c79099feb6d5a493142584e9e2 metrics: - type: Accuracy value: 53.57 - task: type: Natural language inference dataset: type: super_glue name: SuperGLUE (rte) config: rte split: validation revision: 9e12063561e7e6c79099feb6d5a493142584e9e2 metrics: - type: Accuracy value: 67.15 - task: type: Natural language inference dataset: type: xnli name: XNLI (ar) config: ar split: validation revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16 metrics: - type: Accuracy value: 44.46 - task: type: Natural language inference dataset: type: xnli name: XNLI (bg) config: bg split: validation revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16 metrics: - type: Accuracy value: 39.76 - task: type: Natural language inference dataset: type: xnli name: XNLI (de) config: de split: validation revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16 metrics: - type: Accuracy value: 39.36 - task: type: Natural language inference dataset: type: xnli name: XNLI (el) config: el split: validation revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16 metrics: - type: Accuracy value: 40.96 - task: type: Natural language inference dataset: type: xnli name: XNLI (en) config: en split: validation revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16 metrics: - type: Accuracy value: 46.43 - task: type: Natural language inference dataset: type: xnli name: XNLI (es) config: es split: validation revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16 metrics: - type: Accuracy value: 44.98 - task: type: Natural language inference dataset: type: xnli name: XNLI (fr) config: fr split: validation revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16 metrics: - type: Accuracy value: 45.54 - task: type: Natural language inference dataset: type: xnli name: XNLI (hi) config: hi split: validation revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16 metrics: - type: Accuracy value: 41.81 - task: type: Natural language inference dataset: type: xnli name: XNLI (ru) config: ru split: validation revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16 metrics: - type: Accuracy value: 39.64 - task: type: Natural language inference dataset: type: xnli name: XNLI (sw) config: sw split: validation revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16 metrics: - type: Accuracy value: 38.35 - task: type: Natural language inference dataset: type: xnli name: XNLI (th) config: th split: validation revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16 metrics: - type: Accuracy value: 35.5 - task: type: Natural language inference dataset: type: xnli name: XNLI (tr) config: tr split: validation revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16 metrics: - type: Accuracy value: 37.31 - task: type: Natural language inference dataset: type: xnli name: XNLI (ur) config: ur split: validation revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16 metrics: - type: Accuracy value: 38.96 - task: type: Natural language inference dataset: type: xnli name: XNLI (vi) config: vi split: validation revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16 metrics: - type: Accuracy value: 44.74 - task: type: Natural language inference dataset: type: xnli name: XNLI (zh) config: zh split: validation revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16 metrics: - type: Accuracy value: 44.66 - task: type: Program synthesis dataset: type: openai_humaneval name: HumanEval config: None split: test revision: e8dc562f5de170c54b5481011dd9f4fa04845771 metrics: - type: Pass@1 value: 2.18 - type: Pass@10 value: 4.11 - type: Pass@100 value: 9.00 - task: type: Sentence completion dataset: type: story_cloze name: StoryCloze (2016) config: "2016" split: validation revision: e724c6f8cdf7c7a2fb229d862226e15b023ee4db metrics: - type: Accuracy value: 60.29 - task: type: Sentence completion dataset: type: super_glue name: SuperGLUE (copa) config: copa split: validation revision: 9e12063561e7e6c79099feb6d5a493142584e9e2 metrics: - type: Accuracy value: 52.0 - task: type: Sentence completion dataset: type: xcopa name: XCOPA (et) config: et split: validation revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187 metrics: - type: Accuracy value: 53.0 - task: type: Sentence completion dataset: type: xcopa name: XCOPA (ht) config: ht split: validation revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187 metrics: - type: Accuracy value: 49.0 - task: type: Sentence completion dataset: type: xcopa name: XCOPA (id) config: id split: validation revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187 metrics: - type: Accuracy value: 57.0 - task: type: Sentence completion dataset: type: xcopa name: XCOPA (it) config: it split: validation revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187 metrics: - type: Accuracy value: 52.0 - task: type: Sentence completion dataset: type: xcopa name: XCOPA (qu) config: qu split: validation revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187 metrics: - type: Accuracy value: 55.0 - task: type: Sentence completion dataset: type: xcopa name: XCOPA (sw) config: sw split: validation revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187 metrics: - type: Accuracy value: 56.0 - task: type: Sentence completion dataset: type: xcopa name: XCOPA (ta) config: ta split: validation revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187 metrics: - type: Accuracy value: 58.0 - task: type: Sentence completion dataset: type: xcopa name: XCOPA (th) config: th split: validation revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187 metrics: - type: Accuracy value: 58.0 - task: type: Sentence completion dataset: type: xcopa name: XCOPA (tr) config: tr split: validation revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187 metrics: - type: Accuracy value: 61.0 - task: type: Sentence completion dataset: type: xcopa name: XCOPA (vi) config: vi split: validation revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187 metrics: - type: Accuracy value: 61.0 - task: type: Sentence completion dataset: type: xcopa name: XCOPA (zh) config: zh split: validation revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187 metrics: - type: Accuracy value: 61.0 - task: type: Sentence completion dataset: type: Muennighoff/xstory_cloze name: XStoryCloze (ar) config: ar split: validation revision: 8bb76e594b68147f1a430e86829d07189622b90d metrics: - type: Accuracy value: 54.4 - task: type: Sentence completion dataset: type: Muennighoff/xstory_cloze name: XStoryCloze (es) config: es split: validation revision: 8bb76e594b68147f1a430e86829d07189622b90d metrics: - type: Accuracy value: 56.45 - task: type: Sentence completion dataset: type: Muennighoff/xstory_cloze name: XStoryCloze (eu) config: eu split: validation revision: 8bb76e594b68147f1a430e86829d07189622b90d metrics: - type: Accuracy value: 50.56 - task: type: Sentence completion dataset: type: Muennighoff/xstory_cloze name: XStoryCloze (hi) config: hi split: validation revision: 8bb76e594b68147f1a430e86829d07189622b90d metrics: - type: Accuracy value: 55.79 - task: type: Sentence completion dataset: type: Muennighoff/xstory_cloze name: XStoryCloze (id) config: id split: validation revision: 8bb76e594b68147f1a430e86829d07189622b90d metrics: - type: Accuracy value: 57.84 - task: type: Sentence completion dataset: type: Muennighoff/xstory_cloze name: XStoryCloze (my) config: my split: validation revision: 8bb76e594b68147f1a430e86829d07189622b90d metrics: - type: Accuracy value: 47.05 - task: type: Sentence completion dataset: type: Muennighoff/xstory_cloze name: XStoryCloze (ru) config: ru split: validation revision: 8bb76e594b68147f1a430e86829d07189622b90d metrics: - type: Accuracy value: 53.14 - task: type: Sentence completion dataset: type: Muennighoff/xstory_cloze name: XStoryCloze (sw) config: sw split: validation revision: 8bb76e594b68147f1a430e86829d07189622b90d metrics: - type: Accuracy value: 51.36 - task: type: Sentence completion dataset: type: Muennighoff/xstory_cloze name: XStoryCloze (te) config: te split: validation revision: 8bb76e594b68147f1a430e86829d07189622b90d metrics: - type: Accuracy value: 54.86 - task: type: Sentence completion dataset: type: Muennighoff/xstory_cloze name: XStoryCloze (zh) config: zh split: validation revision: 8bb76e594b68147f1a430e86829d07189622b90d metrics: - type: Accuracy value: 56.52 --- ![xmtf](https://github.com/bigscience-workshop/xmtf/blob/master/xmtf_banner.png?raw=true) # Table of Contents 1. [Model Summary](#model-summary) 2. [Use](#use) 3. [Limitations](#limitations) 4. [Training](#training) 5. [Evaluation](#evaluation) 7. [Citation](#citation) # Model Summary > We present BLOOMZ & mT0, a family of models capable of following human instructions in dozens of languages zero-shot. We finetune BLOOM & mT5 pretrained multilingual language models on our crosslingual task mixture (xP3) and find the resulting models capable of crosslingual generalization to unseen tasks & languages. - **Repository:** [bigscience-workshop/xmtf](https://github.com/bigscience-workshop/xmtf) - **Paper:** [Crosslingual Generalization through Multitask Finetuning](https://arxiv.org/abs/2211.01786) - **Point of Contact:** [Niklas Muennighoff](mailto:niklas@hf.co) - **Languages:** Refer to [bloom](https://huggingface.co/bigscience/bloom) for pretraining & [xP3](https://huggingface.co/datasets/bigscience/xP3) for finetuning language proportions. It understands both pretraining & finetuning languages. - **BLOOMZ & mT0 Model Family:** <div class="max-w-full overflow-auto"> <table> <tr> <th colspan="12">Multitask finetuned on <a style="font-weight:bold" href=https://huggingface.co/datasets/bigscience/xP3>xP3</a>. Recommended for prompting in English. </tr> <tr> <td>Parameters</td> <td>300M</td> <td>580M</td> <td>1.2B</td> <td>3.7B</td> <td>13B</td> <td>560M</td> <td>1.1B</td> <td>1.7B</td> <td>3B</td> <td>7.1B</td> <td>176B</td> </tr> <tr> <td>Finetuned Model</td> <td><a href=https://huggingface.co/bigscience/mt0-small>mt0-small</a></td> <td><a href=https://huggingface.co/bigscience/mt0-base>mt0-base</a></td> <td><a href=https://huggingface.co/bigscience/mt0-large>mt0-large</a></td> <td><a href=https://huggingface.co/bigscience/mt0-xl>mt0-xl</a></td> <td><a href=https://huggingface.co/bigscience/mt0-xxl>mt0-xxl</a></td> <td><a href=https://huggingface.co/bigscience/bloomz-560m>bloomz-560m</a></td> <td><a href=https://huggingface.co/bigscience/bloomz-1b1>bloomz-1b1</a></td> <td><a href=https://huggingface.co/bigscience/bloomz-1b7>bloomz-1b7</a></td> <td><a href=https://huggingface.co/bigscience/bloomz-3b>bloomz-3b</a></td> <td><a href=https://huggingface.co/bigscience/bloomz-7b1>bloomz-7b1</a></td> <td><a href=https://huggingface.co/bigscience/bloomz>bloomz</a></td> </tr> </tr> <tr> <th colspan="12">Multitask finetuned on <a style="font-weight:bold" href=https://huggingface.co/datasets/bigscience/xP3mt>xP3mt</a>. Recommended for prompting in non-English.</th> </tr> <tr> <td>Finetuned Model</td> <td></td> <td></td> <td></td> <td></td> <td><a href=https://huggingface.co/bigscience/mt0-xxl-mt>mt0-xxl-mt</a></td> <td></td> <td></td> <td></td> <td></td> <td><a href=https://huggingface.co/bigscience/bloomz-7b1-mt>bloomz-7b1-mt</a></td> <td><a href=https://huggingface.co/bigscience/bloomz-mt>bloomz-mt</a></td> </tr> <th colspan="12">Multitask finetuned on <a style="font-weight:bold" href=https://huggingface.co/datasets/Muennighoff/P3>P3</a>. Released for research purposes only. Strictly inferior to above models!</th> </tr> <tr> <td>Finetuned Model</td> <td></td> <td></td> <td></td> <td></td> <td><a href=https://huggingface.co/bigscience/mt0-xxl-p3>mt0-xxl-p3</a></td> <td></td> <td></td> <td></td> <td></td> <td><a href=https://huggingface.co/bigscience/bloomz-7b1-p3>bloomz-7b1-p3</a></td> <td><a href=https://huggingface.co/bigscience/bloomz-p3>bloomz-p3</a></td> </tr> <th colspan="12">Original pretrained checkpoints. Not recommended.</th> <tr> <td>Pretrained Model</td> <td><a href=https://huggingface.co/google/mt5-small>mt5-small</a></td> <td><a href=https://huggingface.co/google/mt5-base>mt5-base</a></td> <td><a href=https://huggingface.co/google/mt5-large>mt5-large</a></td> <td><a href=https://huggingface.co/google/mt5-xl>mt5-xl</a></td> <td><a href=https://huggingface.co/google/mt5-xxl>mt5-xxl</a></td> <td><a href=https://huggingface.co/bigscience/bloom-560m>bloom-560m</a></td> <td><a href=https://huggingface.co/bigscience/bloom-1b1>bloom-1b1</a></td> <td><a href=https://huggingface.co/bigscience/bloom-1b7>bloom-1b7</a></td> <td><a href=https://huggingface.co/bigscience/bloom-3b>bloom-3b</a></td> <td><a href=https://huggingface.co/bigscience/bloom-7b1>bloom-7b1</a></td> <td><a href=https://huggingface.co/bigscience/bloom>bloom</a></td> </tr> </table> </div> # Use ## Intended use We recommend using the model to perform tasks expressed in natural language. For example, given the prompt "*Translate to English: Je t’aime.*", the model will most likely answer "*I love you.*". Some prompt ideas from our paper: - 一个传奇的开端,一个不灭的神话,这不仅仅是一部电影,而是作为一个走进新时代的标签,永远彪炳史册。你认为这句话的立场是赞扬、中立还是批评? - Suggest at least five related search terms to "Mạng neural nhân tạo". - Write a fairy tale about a troll saving a princess from a dangerous dragon. The fairy tale is a masterpiece that has achieved praise worldwide and its moral is "Heroes Come in All Shapes and Sizes". Story (in Spanish): - Explain in a sentence in Telugu what is backpropagation in neural networks. **Feel free to share your generations in the Community tab!** ## How to use ### CPU <details> <summary> Click to expand </summary> ```python # pip install -q transformers from transformers import AutoModelForCausalLM, AutoTokenizer checkpoint = "bigscience/bloomz-560m" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForCausalLM.from_pretrained(checkpoint) inputs = tokenizer.encode("Translate to English: Je t’aime.", return_tensors="pt") outputs = model.generate(inputs) print(tokenizer.decode(outputs[0])) ``` </details> ### GPU <details> <summary> Click to expand </summary> ```python # pip install -q transformers accelerate from transformers import AutoModelForCausalLM, AutoTokenizer checkpoint = "bigscience/bloomz-560m" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForCausalLM.from_pretrained(checkpoint, torch_dtype="auto", device_map="auto") inputs = tokenizer.encode("Translate to English: Je t’aime.", return_tensors="pt").to("cuda") outputs = model.generate(inputs) print(tokenizer.decode(outputs[0])) ``` </details> ### GPU in 8bit <details> <summary> Click to expand </summary> ```python # pip install -q transformers accelerate bitsandbytes from transformers import AutoModelForCausalLM, AutoTokenizer checkpoint = "bigscience/bloomz-560m" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto", load_in_8bit=True) inputs = tokenizer.encode("Translate to English: Je t’aime.", return_tensors="pt").to("cuda") outputs = model.generate(inputs) print(tokenizer.decode(outputs[0])) ``` </details> <!-- Necessary for whitespace --> ### # Limitations **Prompt Engineering:** The performance may vary depending on the prompt. For BLOOMZ models, we recommend making it very clear when the input stops to avoid the model trying to continue it. For example, the prompt "*Translate to English: Je t'aime*" without the full stop (.) at the end, may result in the model trying to continue the French sentence. Better prompts are e.g. "*Translate to English: Je t'aime.*", "*Translate to English: Je t'aime. Translation:*" "*What is "Je t'aime." in English?*", where it is clear for the model when it should answer. Further, we recommend providing the model as much context as possible. For example, if you want it to answer in Telugu, then tell the model, e.g. "*Explain in a sentence in Telugu what is backpropagation in neural networks.*". # Training ## Model - **Architecture:** Same as [bloom-560m](https://huggingface.co/bigscience/bloom-560m), also refer to the `config.json` file - **Finetuning steps:** 1750 - **Finetuning tokens:** 3.67 billion - **Finetuning layout:** 1x pipeline parallel, 1x tensor parallel, 1x data parallel - **Precision:** float16 ## Hardware - **CPUs:** AMD CPUs with 512GB memory per node - **GPUs:** 64 A100 80GB GPUs with 8 GPUs per node (8 nodes) using NVLink 4 inter-gpu connects, 4 OmniPath links - **Communication:** NCCL-communications network with a fully dedicated subnet ## Software - **Orchestration:** [Megatron-DeepSpeed](https://github.com/bigscience-workshop/Megatron-DeepSpeed) - **Optimizer & parallelism:** [DeepSpeed](https://github.com/microsoft/DeepSpeed) - **Neural networks:** [PyTorch](https://github.com/pytorch/pytorch) (pytorch-1.11 w/ CUDA-11.5) - **FP16 if applicable:** [apex](https://github.com/NVIDIA/apex) # Evaluation We refer to Table 7 from our [paper](https://arxiv.org/abs/2211.01786) & [bigscience/evaluation-results](https://huggingface.co/datasets/bigscience/evaluation-results) for zero-shot results on unseen tasks. The sidebar reports zero-shot performance of the best prompt per dataset config. # Citation ```bibtex @article{muennighoff2022crosslingual, title={Crosslingual generalization through multitask finetuning}, author={Muennighoff, Niklas and Wang, Thomas and Sutawika, Lintang and Roberts, Adam and Biderman, Stella and Scao, Teven Le and Bari, M Saiful and Shen, Sheng and Yong, Zheng-Xin and Schoelkopf, Hailey and others}, journal={arXiv preprint arXiv:2211.01786}, year={2022} } ```
coqui/XTTS-v2
coqui
"2023-12-11T17:50:00Z"
396,200
1,067
coqui
[ "coqui", "text-to-speech", "license:other", "has_space", "region:us" ]
text-to-speech
"2023-10-31T10:11:33Z"
--- license: other license_name: coqui-public-model-license license_link: https://coqui.ai/cpml library_name: coqui pipeline_tag: text-to-speech widget: - text: "Once when I was six years old I saw a magnificent picture" --- # ⓍTTS ⓍTTS is a Voice generation model that lets you clone voices into different languages by using just a quick 6-second audio clip. There is no need for an excessive amount of training data that spans countless hours. This is the same or similar model to what powers [Coqui Studio](https://coqui.ai/) and [Coqui API](https://docs.coqui.ai/docs). ### Features - Supports 17 languages. - Voice cloning with just a 6-second audio clip. - Emotion and style transfer by cloning. - Cross-language voice cloning. - Multi-lingual speech generation. - 24khz sampling rate. ### Updates over XTTS-v1 - 2 new languages; Hungarian and Korean - Architectural improvements for speaker conditioning. - Enables the use of multiple speaker references and interpolation between speakers. - Stability improvements. - Better prosody and audio quality across the board. ### Languages XTTS-v2 supports 17 languages: **English (en), Spanish (es), French (fr), German (de), Italian (it), Portuguese (pt), Polish (pl), Turkish (tr), Russian (ru), Dutch (nl), Czech (cs), Arabic (ar), Chinese (zh-cn), Japanese (ja), Hungarian (hu), Korean (ko) Hindi (hi)**. Stay tuned as we continue to add support for more languages. If you have any language requests, feel free to reach out! ### Code The [code-base](https://github.com/coqui-ai/TTS) supports inference and [fine-tuning](https://tts.readthedocs.io/en/latest/models/xtts.html#training). ### Demo Spaces - [XTTS Space](https://huggingface.co/spaces/coqui/xtts) : You can see how model performs on supported languages, and try with your own reference or microphone input - [XTTS Voice Chat with Mistral or Zephyr](https://huggingface.co/spaces/coqui/voice-chat-with-mistral) : You can experience streaming voice chat with Mistral 7B Instruct or Zephyr 7B Beta | | | | ------------------------------- | --------------------------------------- | | 🐸💬 **CoquiTTS** | [coqui/TTS on Github](https://github.com/coqui-ai/TTS)| | 💼 **Documentation** | [ReadTheDocs](https://tts.readthedocs.io/en/latest/) | 👩‍💻 **Questions** | [GitHub Discussions](https://github.com/coqui-ai/TTS/discussions) | | 🗯 **Community** | [Discord](https://discord.gg/5eXr5seRrv) | ### License This model is licensed under [Coqui Public Model License](https://coqui.ai/cpml). There's a lot that goes into a license for generative models, and you can read more of [the origin story of CPML here](https://coqui.ai/blog/tts/cpml). ### Contact Come and join in our 🐸Community. We're active on [Discord](https://discord.gg/fBC58unbKE) and [Twitter](https://twitter.com/coqui_ai). You can also mail us at info@coqui.ai. Using 🐸TTS API: ```python from TTS.api import TTS tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2", gpu=True) # generate speech by cloning a voice using default settings tts.tts_to_file(text="It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.", file_path="output.wav", speaker_wav="/path/to/target/speaker.wav", language="en") ``` Using 🐸TTS Command line: ```console tts --model_name tts_models/multilingual/multi-dataset/xtts_v2 \ --text "Bugün okula gitmek istemiyorum." \ --speaker_wav /path/to/target/speaker.wav \ --language_idx tr \ --use_cuda true ``` Using the model directly: ```python from TTS.tts.configs.xtts_config import XttsConfig from TTS.tts.models.xtts import Xtts config = XttsConfig() config.load_json("/path/to/xtts/config.json") model = Xtts.init_from_config(config) model.load_checkpoint(config, checkpoint_dir="/path/to/xtts/", eval=True) model.cuda() outputs = model.synthesize( "It took me quite a long time to develop a voice and now that I have it I am not going to be silent.", config, speaker_wav="/data/TTS-public/_refclips/3.wav", gpt_cond_len=3, language="en", ) ```
databricks/dolly-v2-3b
databricks
"2023-06-30T18:33:24Z"
394,202
277
transformers
[ "transformers", "pytorch", "gpt_neox", "text-generation", "en", "dataset:databricks/databricks-dolly-15k", "license:mit", "autotrain_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2023-04-13T05:20:15Z"
--- license: mit language: - en library_name: transformers inference: false datasets: - databricks/databricks-dolly-15k --- # dolly-v2-3b Model Card ## Summary Databricks' `dolly-v2-3b`, an instruction-following large language model trained on the Databricks machine learning platform that is licensed for commercial use. Based on `pythia-2.8b`, Dolly is trained on ~15k instruction/response fine tuning records [`databricks-dolly-15k`](https://github.com/databrickslabs/dolly/tree/master/data) generated by Databricks employees in capability domains from the InstructGPT paper, including brainstorming, classification, closed QA, generation, information extraction, open QA and summarization. `dolly-v2-3b` is not a state-of-the-art model, but does exhibit surprisingly high quality instruction following behavior not characteristic of the foundation model on which it is based. Dolly v2 is also available in these larger models sizes: * [dolly-v2-12b](https://huggingface.co/databricks/dolly-v2-12b), a 12 billion parameter based on `pythia-12b` * [dolly-v2-7b](https://huggingface.co/databricks/dolly-v2-7b), a 6.9 billion parameter based on `pythia-6.9b` Please refer to the [dolly GitHub repo](https://github.com/databrickslabs/dolly#getting-started-with-response-generation) for tips on running inference for various GPU configurations. **Owner**: Databricks, Inc. ## Model Overview `dolly-v2-3b` is a 2.8 billion parameter causal language model created by [Databricks](https://databricks.com/) that is derived from [EleutherAI's](https://www.eleuther.ai/) [Pythia-2.8b](https://huggingface.co/EleutherAI/pythia-2.8b) and fine-tuned on a [~15K record instruction corpus](https://github.com/databrickslabs/dolly/tree/master/data) generated by Databricks employees and released under a permissive license (CC-BY-SA) ## Usage To use the model with the `transformers` library on a machine with GPUs, first make sure you have the `transformers` and `accelerate` libraries installed. In a Databricks notebook you could run: ```python %pip install "accelerate>=0.16.0,<1" "transformers[torch]>=4.28.1,<5" "torch>=1.13.1,<2" ``` The instruction following pipeline can be loaded using the `pipeline` function as shown below. This loads a custom `InstructionTextGenerationPipeline` found in the model repo [here](https://huggingface.co/databricks/dolly-v2-3b/blob/main/instruct_pipeline.py), which is why `trust_remote_code=True` is required. Including `torch_dtype=torch.bfloat16` is generally recommended if this type is supported in order to reduce memory usage. It does not appear to impact output quality. It is also fine to remove it if there is sufficient memory. ```python import torch from transformers import pipeline generate_text = pipeline(model="databricks/dolly-v2-3b", torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto") ``` You can then use the pipeline to answer instructions: ```python res = generate_text("Explain to me the difference between nuclear fission and fusion.") print(res[0]["generated_text"]) ``` Alternatively, if you prefer to not use `trust_remote_code=True` you can download [instruct_pipeline.py](https://huggingface.co/databricks/dolly-v2-3b/blob/main/instruct_pipeline.py), store it alongside your notebook, and construct the pipeline yourself from the loaded model and tokenizer: ```python import torch from instruct_pipeline import InstructionTextGenerationPipeline from transformers import AutoModelForCausalLM, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-3b", padding_side="left") model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-3b", device_map="auto", torch_dtype=torch.bfloat16) generate_text = InstructionTextGenerationPipeline(model=model, tokenizer=tokenizer) ``` ### LangChain Usage To use the pipeline with LangChain, you must set `return_full_text=True`, as LangChain expects the full text to be returned and the default for the pipeline is to only return the new text. ```python import torch from transformers import pipeline generate_text = pipeline(model="databricks/dolly-v2-3b", torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", return_full_text=True) ``` You can create a prompt that either has only an instruction or has an instruction with context: ```python from langchain import PromptTemplate, LLMChain from langchain.llms import HuggingFacePipeline # template for an instrution with no input prompt = PromptTemplate( input_variables=["instruction"], template="{instruction}") # template for an instruction with input prompt_with_context = PromptTemplate( input_variables=["instruction", "context"], template="{instruction}\n\nInput:\n{context}") hf_pipeline = HuggingFacePipeline(pipeline=generate_text) llm_chain = LLMChain(llm=hf_pipeline, prompt=prompt) llm_context_chain = LLMChain(llm=hf_pipeline, prompt=prompt_with_context) ``` Example predicting using a simple instruction: ```python print(llm_chain.predict(instruction="Explain to me the difference between nuclear fission and fusion.").lstrip()) ``` Example predicting using an instruction with context: ```python context = """George Washington (February 22, 1732[b] - December 14, 1799) was an American military officer, statesman, and Founding Father who served as the first president of the United States from 1789 to 1797.""" print(llm_context_chain.predict(instruction="When was George Washington president?", context=context).lstrip()) ``` ## Known Limitations ### Performance Limitations **`dolly-v2-3b` is not a state-of-the-art generative language model** and, though quantitative benchmarking is ongoing, is not designed to perform competitively with more modern model architectures or models subject to larger pretraining corpuses. The Dolly model family is under active development, and so any list of shortcomings is unlikely to be exhaustive, but we include known limitations and misfires here as a means to document and share our preliminary findings with the community. In particular, `dolly-v2-3b` struggles with: syntactically complex prompts, programming problems, mathematical operations, factual errors, dates and times, open-ended question answering, hallucination, enumerating lists of specific length, stylistic mimicry, having a sense of humor, etc. Moreover, we find that `dolly-v2-3b` does not have some capabilities, such as well-formatted letter writing, present in the original model. ### Dataset Limitations Like all language models, `dolly-v2-3b` reflects the content and limitations of its training corpuses. - **The Pile**: GPT-J's pre-training corpus contains content mostly collected from the public internet, and like most web-scale datasets, it contains content many users would find objectionable. As such, the model is likely to reflect these shortcomings, potentially overtly in the case it is explicitly asked to produce objectionable content, and sometimes subtly, as in the case of biased or harmful implicit associations. - **`databricks-dolly-15k`**: The training data on which `dolly-v2-3b` is instruction tuned represents natural language instructions generated by Databricks employees during a period spanning March and April 2023 and includes passages from Wikipedia as references passages for instruction categories like closed QA and summarization. To our knowledge it does not contain obscenity, intellectual property or personally identifying information about non-public figures, but it may contain typos and factual errors. The dataset may also reflect biases found in Wikipedia. Finally, the dataset likely reflects the interests and semantic choices of Databricks employees, a demographic which is not representative of the global population at large. Databricks is committed to ongoing research and development efforts to develop helpful, honest and harmless AI technologies that maximize the potential of all individuals and organizations. ### Benchmark Metrics Below you'll find various models benchmark performance on the [EleutherAI LLM Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness); model results are sorted by geometric mean to produce an intelligible ordering. As outlined above, these results demonstrate that `dolly-v2-3b` is not state of the art. It underperforms `dolly-v1-6b` in the evaluation benchmarks, which is not surprising considering it has half the number of parameters. | model | openbookqa | arc_easy | winogrande | hellaswag | arc_challenge | piqa | boolq | gmean | | --------------------------------- | ------------ | ---------- | ------------ | ----------- | --------------- | -------- | -------- | ---------| | EleutherAI/pythia-2.8b | 0.348 | 0.585859 | 0.589582 | 0.591217 | 0.323379 | 0.73395 | 0.638226 | 0.523431 | | EleutherAI/pythia-6.9b | 0.368 | 0.604798 | 0.608524 | 0.631548 | 0.343857 | 0.761153 | 0.6263 | 0.543567 | | databricks/dolly-v2-3b | 0.384 | 0.611532 | 0.589582 | 0.650767 | 0.370307 | 0.742655 | 0.575535 | 0.544886 | | EleutherAI/pythia-12b | 0.364 | 0.627104 | 0.636148 | 0.668094 | 0.346416 | 0.760065 | 0.673394 | 0.559676 | | EleutherAI/gpt-j-6B | 0.382 | 0.621633 | 0.651144 | 0.662617 | 0.363481 | 0.761153 | 0.655963 | 0.565936 | | databricks/dolly-v2-12b | 0.408 | 0.63931 | 0.616417 | 0.707927 | 0.388225 | 0.757889 | 0.568196 | 0.56781 | | databricks/dolly-v2-7b | 0.392 | 0.633838 | 0.607735 | 0.686517 | 0.406997 | 0.750816 | 0.644037 | 0.573487 | | databricks/dolly-v1-6b | 0.41 | 0.62963 | 0.643252 | 0.676758 | 0.384812 | 0.773667 | 0.687768 | 0.583431 | | EleutherAI/gpt-neox-20b | 0.402 | 0.683923 | 0.656669 | 0.7142 | 0.408703 | 0.784004 | 0.695413 | 0.602236 | # Citation ``` @online{DatabricksBlog2023DollyV2, author = {Mike Conover and Matt Hayes and Ankit Mathur and Jianwei Xie and Jun Wan and Sam Shah and Ali Ghodsi and Patrick Wendell and Matei Zaharia and Reynold Xin}, title = {Free Dolly: Introducing the World's First Truly Open Instruction-Tuned LLM}, year = {2023}, url = {https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm}, urldate = {2023-06-30} } ``` # Happy Hacking!
speechbrain/spkrec-ecapa-voxceleb
speechbrain
"2024-02-19T22:39:59Z"
391,316
125
speechbrain
[ "speechbrain", "embeddings", "Speaker", "Verification", "Identification", "pytorch", "ECAPA", "TDNN", "en", "dataset:voxceleb", "arxiv:2106.04624", "license:apache-2.0", "has_space", "region:us" ]
null
"2022-03-02T23:29:05Z"
--- language: "en" thumbnail: tags: - speechbrain - embeddings - Speaker - Verification - Identification - pytorch - ECAPA - TDNN license: "apache-2.0" datasets: - voxceleb metrics: - EER widget: - example_title: VoxCeleb Speaker id10003 src: https://cdn-media.huggingface.co/speech_samples/VoxCeleb1_00003.wav - example_title: VoxCeleb Speaker id10004 src: https://cdn-media.huggingface.co/speech_samples/VoxCeleb_00004.wav --- <iframe src="https://ghbtns.com/github-btn.html?user=speechbrain&repo=speechbrain&type=star&count=true&size=large&v=2" frameborder="0" scrolling="0" width="170" height="30" title="GitHub"></iframe> <br/><br/> # Speaker Verification with ECAPA-TDNN embeddings on Voxceleb This repository provides all the necessary tools to perform speaker verification with a pretrained ECAPA-TDNN model using SpeechBrain. The system can be used to extract speaker embeddings as well. It is trained on Voxceleb 1+ Voxceleb2 training data. For a better experience, we encourage you to learn more about [SpeechBrain](https://speechbrain.github.io). The model performance on Voxceleb1-test set(Cleaned) is: | Release | EER(%) |:-------------:|:--------------:| | 05-03-21 | 0.80 | ## Pipeline description This system is composed of an ECAPA-TDNN model. It is a combination of convolutional and residual blocks. The embeddings are extracted using attentive statistical pooling. The system is trained with Additive Margin Softmax Loss. Speaker Verification is performed using cosine distance between speaker embeddings. ## Install SpeechBrain First of all, please install SpeechBrain with the following command: ``` pip install git+https://github.com/speechbrain/speechbrain.git@develop ``` Please notice that we encourage you to read our tutorials and learn more about [SpeechBrain](https://speechbrain.github.io). ### Compute your speaker embeddings ```python import torchaudio from speechbrain.inference.speaker import EncoderClassifier classifier = EncoderClassifier.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb") signal, fs =torchaudio.load('tests/samples/ASR/spk1_snt1.wav') embeddings = classifier.encode_batch(signal) ``` The system is trained with recordings sampled at 16kHz (single channel). The code will automatically normalize your audio (i.e., resampling + mono channel selection) when calling *classify_file* if needed. Make sure your input tensor is compliant with the expected sampling rate if you use *encode_batch* and *classify_batch*. ### Perform Speaker Verification ```python from speechbrain.inference.speaker import SpeakerRecognition verification = SpeakerRecognition.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb", savedir="pretrained_models/spkrec-ecapa-voxceleb") score, prediction = verification.verify_files("tests/samples/ASR/spk1_snt1.wav", "tests/samples/ASR/spk2_snt1.wav") # Different Speakers score, prediction = verification.verify_files("tests/samples/ASR/spk1_snt1.wav", "tests/samples/ASR/spk1_snt2.wav") # Same Speaker ``` The prediction is 1 if the two signals in input are from the same speaker and 0 otherwise. ### Inference on GPU To perform inference on the GPU, add `run_opts={"device":"cuda"}` when calling the `from_hparams` method. ### Training The model was trained with SpeechBrain (aa018540). To train it from scratch follows these steps: 1. Clone SpeechBrain: ```bash git clone https://github.com/speechbrain/speechbrain/ ``` 2. Install it: ``` cd speechbrain pip install -r requirements.txt pip install -e . ``` 3. Run Training: ``` cd recipes/VoxCeleb/SpeakerRec python train_speaker_embeddings.py hparams/train_ecapa_tdnn.yaml --data_folder=your_data_folder ``` You can find our training results (models, logs, etc) [here](https://drive.google.com/drive/folders/1-ahC1xeyPinAHp2oAohL-02smNWO41Cc?usp=sharing). ### Limitations The SpeechBrain team does not provide any warranty on the performance achieved by this model when used on other datasets. #### Referencing ECAPA-TDNN ``` @inproceedings{DBLP:conf/interspeech/DesplanquesTD20, author = {Brecht Desplanques and Jenthe Thienpondt and Kris Demuynck}, editor = {Helen Meng and Bo Xu and Thomas Fang Zheng}, title = {{ECAPA-TDNN:} Emphasized Channel Attention, Propagation and Aggregation in {TDNN} Based Speaker Verification}, booktitle = {Interspeech 2020}, pages = {3830--3834}, publisher = {{ISCA}}, year = {2020}, } ``` # **Citing SpeechBrain** Please, cite SpeechBrain if you use it for your research or business. ```bibtex @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, year={2021}, eprint={2106.04624}, archivePrefix={arXiv}, primaryClass={eess.AS}, note={arXiv:2106.04624} } ``` # **About SpeechBrain** - Website: https://speechbrain.github.io/ - Code: https://github.com/speechbrain/speechbrain/ - HuggingFace: https://huggingface.co/speechbrain/
openai-community/gpt2-xl
openai-community
"2024-02-19T12:39:12Z"
389,043
265
transformers
[ "transformers", "pytorch", "tf", "jax", "rust", "safetensors", "gpt2", "text-generation", "en", "arxiv:1910.09700", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2022-03-02T23:29:04Z"
--- language: en license: mit --- # GPT-2 XL ## Table of Contents - [Model Details](#model-details) - [How To Get Started With the Model](#how-to-get-started-with-the-model) - [Uses](#uses) - [Risks, Limitations and Biases](#risks-limitations-and-biases) - [Training](#training) - [Evaluation](#evaluation) - [Environmental Impact](#environmental-impact) - [Technical Specifications](#technical-specifications) - [Citation Information](#citation-information) - [Model Card Authors](#model-card-authors) ## Model Details **Model Description:** GPT-2 XL is the **1.5B parameter** version of GPT-2, a transformer-based language model created and released by OpenAI. The model is a pretrained model on English language using a causal language modeling (CLM) objective. - **Developed by:** OpenAI, see [associated research paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) and [GitHub repo](https://github.com/openai/gpt-2) for model developers. - **Model Type:** Transformer-based language model - **Language(s):** English - **License:** [Modified MIT License](https://github.com/openai/gpt-2/blob/master/LICENSE) - **Related Models:** [GPT-2](https://huggingface.co/gpt2), [GPT-Medium](https://huggingface.co/gpt2-medium) and [GPT-Large](https://huggingface.co/gpt2-large) - **Resources for more information:** - [Research Paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) - [OpenAI Blog Post](https://openai.com/blog/better-language-models/) - [GitHub Repo](https://github.com/openai/gpt-2) - [OpenAI Model Card for GPT-2](https://github.com/openai/gpt-2/blob/master/model_card.md) - [OpenAI GPT-2 1.5B Release Blog Post](https://openai.com/blog/gpt-2-1-5b-release/) - Test the full generation capabilities here: https://transformer.huggingface.co/doc/gpt2-large ## How to Get Started with the Model Use the code below to get started with the model. You can use this model directly with a pipeline for text generation. Since the generation relies on some randomness, we set a seed for reproducibility: ```python from transformers import pipeline, set_seed generator = pipeline('text-generation', model='gpt2-xl') set_seed(42) generator("Hello, I'm a language model,", max_length=30, num_return_sequences=5) ``` Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import GPT2Tokenizer, GPT2Model tokenizer = GPT2Tokenizer.from_pretrained('gpt2-xl') model = GPT2Model.from_pretrained('gpt2-xl') text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import GPT2Tokenizer, TFGPT2Model tokenizer = GPT2Tokenizer.from_pretrained('gpt2-xl') model = TFGPT2Model.from_pretrained('gpt2-xl') text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ## Uses #### Direct Use In their [model card about GPT-2](https://github.com/openai/gpt-2/blob/master/model_card.md), OpenAI wrote: > The primary intended users of these models are AI researchers and practitioners. > > We primarily imagine these language models will be used by researchers to better understand the behaviors, capabilities, biases, and constraints of large-scale generative language models. #### Downstream Use In their [model card about GPT-2](https://github.com/openai/gpt-2/blob/master/model_card.md), OpenAI wrote: > Here are some secondary use cases we believe are likely: > > - Writing assistance: Grammar assistance, autocompletion (for normal prose or code) > - Creative writing and art: exploring the generation of creative, fictional texts; aiding creation of poetry and other literary art. > - Entertainment: Creation of games, chat bots, and amusing generations. #### Misuse and Out-of-scope Use In their [model card about GPT-2](https://github.com/openai/gpt-2/blob/master/model_card.md), OpenAI wrote: > Because large-scale language models like GPT-2 do not distinguish fact from fiction, we don’t support use-cases that require the generated text to be true. > > Additionally, language models like GPT-2 reflect the biases inherent to the systems they were trained on, so we do not recommend that they be deployed into systems that interact with humans unless the deployers first carry out a study of biases relevant to the intended use-case. We found no statistically significant difference in gender, race, and religious bias probes between 774M and 1.5B, implying all versions of GPT-2 should be approached with similar levels of caution around use cases that are sensitive to biases around human attributes. ## Risks, Limitations and Biases **CONTENT WARNING: Readers should be aware this section contains content that is disturbing, offensive, and can propogate historical and current stereotypes.** #### Biases Significant research has explored bias and fairness issues with language models (see, e.g., [Sheng et al. (2021)](https://aclanthology.org/2021.acl-long.330.pdf) and [Bender et al. (2021)](https://dl.acm.org/doi/pdf/10.1145/3442188.3445922)). The training data used for this model has not been released as a dataset one can browse. We know it contains a lot of unfiltered content from the internet, which is far from neutral. Predictions generated by the model can include disturbing and harmful stereotypes across protected classes; identity characteristics; and sensitive, social, and occupational groups. For example: ```python from transformers import pipeline, set_seed generator = pipeline('text-generation', model='gpt2-xl') set_seed(42) generator("The man worked as a", max_length=10, num_return_sequences=5) set_seed(42) generator("The woman worked as a", max_length=10, num_return_sequences=5) ``` This bias will also affect all fine-tuned versions of this model. Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. #### Risks and Limitations When they released the 1.5B parameter model, OpenAI wrote in a [blog post](https://openai.com/blog/gpt-2-1-5b-release/): > GPT-2 can be fine-tuned for misuse. Our partners at the Middlebury Institute of International Studies’ Center on Terrorism, Extremism, and Counterterrorism (CTEC) found that extremist groups can use GPT-2 for misuse, specifically by fine-tuning GPT-2 models on four ideological positions: white supremacy, Marxism, jihadist Islamism, and anarchism. CTEC demonstrated that it’s possible to create models that can generate synthetic propaganda for these ideologies. They also show that, despite having low detection accuracy on synthetic outputs, ML-based detection methods can give experts reasonable suspicion that an actor is generating synthetic text. The blog post further discusses the risks, limitations, and biases of the model. ## Training #### Training Data The OpenAI team wanted to train this model on a corpus as large as possible. To build it, they scraped all the web pages from outbound links on Reddit which received at least 3 karma. Note that all Wikipedia pages were removed from this dataset, so the model was not trained on any part of Wikipedia. The resulting dataset (called WebText) weights 40GB of texts but has not been publicly released. You can find a list of the top 1,000 domains present in WebText [here](https://github.com/openai/gpt-2/blob/master/domains.txt). #### Training Procedure The model is pretrained on a very large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was trained to guess the next word in sentences. More precisely, inputs are sequences of continuous text of a certain length and the targets are the same sequence, shifted one token (word or piece of word) to the right. The model uses internally a mask-mechanism to make sure the predictions for the token `i` only uses the inputs from `1` to `i` but not the future tokens. This way, the model learns an inner representation of the English language that can then be used to extract features useful for downstream tasks. The texts are tokenized using a byte-level version of Byte Pair Encoding (BPE) (for unicode characters) and a vocabulary size of 50,257. The inputs are sequences of 1024 consecutive tokens. ## Evaluation The following evaluation information is extracted from the [associated paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf). #### Testing Data, Factors and Metrics The model authors write in the [associated paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) that: > Since our model operates on a byte level and does not require lossy pre-processing or tokenization, we can evaluate it on any language model benchmark. Results on language modeling datasets are commonly reported in a quantity which is a scaled or ex- ponentiated version of the average negative log probability per canonical prediction unit - usually a character, a byte, or a word. We evaluate the same quantity by computing the log-probability of a dataset according to a WebText LM and dividing by the number of canonical units. For many of these datasets, WebText LMs would be tested significantly out- of-distribution, having to predict aggressively standardized text, tokenization artifacts such as disconnected punctuation and contractions, shuffled sentences, and even the string <UNK> which is extremely rare in WebText - occurring only 26 times in 40 billion bytes. We report our main results...using invertible de-tokenizers which remove as many of these tokenization / pre-processing artifacts as possible. Since these de-tokenizers are invertible, we can still calculate the log probability of a dataset and they can be thought of as a simple form of domain adaptation. #### Results The model achieves the following results without any fine-tuning (zero-shot): | Dataset | LAMBADA | LAMBADA | CBT-CN | CBT-NE | WikiText2 | PTB | enwiki8 | text8 | WikiText103 | 1BW | |:--------:|:-------:|:-------:|:------:|:------:|:---------:|:------:|:-------:|:------:|:-----------:|:-----:| | (metric) | (PPL) | (ACC) | (ACC) | (ACC) | (PPL) | (PPL) | (BPB) | (BPC) | (PPL) | (PPL) | | | 8.63 | 63.24 | 93.30 | 89.05 | 18.34 | 35.76 | 0.93 | 0.98 | 17.48 | 42.16 | ## Environmental Impact Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). The hardware type and hours used are based on information provided by one of the model authors on [Reddit](https://bit.ly/2Tw1x4L). - **Hardware Type:** 32 TPUv3 chips - **Hours used:** 168 - **Cloud Provider:** Unknown - **Compute Region:** Unknown - **Carbon Emitted:** Unknown ## Technical Specifications See the [associated paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) for details on the modeling architecture, objective, and training details. ## Citation Information ```bibtex @article{radford2019language, title={Language models are unsupervised multitask learners}, author={Radford, Alec and Wu, Jeffrey and Child, Rewon and Luan, David and Amodei, Dario and Sutskever, Ilya and others}, journal={OpenAI blog}, volume={1}, number={8}, pages={9}, year={2019} } ``` ## Model Card Authors This model card was written by the Hugging Face team.
Intel/dpt-large
Intel
"2024-02-24T11:22:17Z"
388,703
144
transformers
[ "transformers", "pytorch", "safetensors", "dpt", "depth-estimation", "vision", "arxiv:2103.13413", "license:apache-2.0", "model-index", "endpoints_compatible", "has_space", "region:us" ]
depth-estimation
"2022-03-02T23:29:05Z"
--- license: apache-2.0 tags: - vision - depth-estimation widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace model-index: - name: dpt-large results: - task: type: monocular-depth-estimation name: Monocular Depth Estimation dataset: type: MIX-6 name: MIX-6 metrics: - type: Zero-shot transfer value: 10.82 name: Zero-shot transfer config: Zero-shot transfer verified: false --- ## Model Details: DPT-Large (also known as MiDaS 3.0) Dense Prediction Transformer (DPT) model trained on 1.4 million images for monocular depth estimation. It was introduced in the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by Ranftl et al. (2021) and first released in [this repository](https://github.com/isl-org/DPT). DPT uses the Vision Transformer (ViT) as backbone and adds a neck + head on top for monocular depth estimation. ![model image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/dpt_architecture.jpg) The model card has been written in combination by the Hugging Face team and Intel. | Model Detail | Description | | ----------- | ----------- | | Model Authors - Company | Intel | | Date | March 22, 2022 | | Version | 1 | | Type | Computer Vision - Monocular Depth Estimation | | Paper or Other Resources | [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) and [GitHub Repo](https://github.com/isl-org/DPT) | | License | Apache 2.0 | | Questions or Comments | [Community Tab](https://huggingface.co/Intel/dpt-large/discussions) and [Intel Developers Discord](https://discord.gg/rv2Gp55UJQ)| | Intended Use | Description | | ----------- | ----------- | | Primary intended uses | You can use the raw model for zero-shot monocular depth estimation. See the [model hub](https://huggingface.co/models?search=dpt) to look for fine-tuned versions on a task that interests you. | | Primary intended users | Anyone doing monocular depth estimation | | Out-of-scope uses | This model in most cases will need to be fine-tuned for your particular task. The model should not be used to intentionally create hostile or alienating environments for people.| ### How to use The easiest is leveraging the pipeline API: ``` from transformers import pipeline pipe = pipeline(task="depth-estimation", model="Intel/dpt-large") result = pipe(image) result["depth"] ``` In case you want to implement the entire logic yourself, here's how to do that for zero-shot depth estimation on an image: ```python from transformers import DPTImageProcessor, DPTForDepthEstimation import torch import numpy as np from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) processor = DPTImageProcessor.from_pretrained("Intel/dpt-large") model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large") # prepare image for the model inputs = processor(images=image, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # interpolate to original size prediction = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1), size=image.size[::-1], mode="bicubic", align_corners=False, ) # visualize the prediction output = prediction.squeeze().cpu().numpy() formatted = (output * 255 / np.max(output)).astype("uint8") depth = Image.fromarray(formatted) ``` For more code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/master/en/model_doc/dpt). | Factors | Description | | ----------- | ----------- | | Groups | Multiple datasets compiled together | | Instrumentation | - | | Environment | Inference completed on Intel Xeon Platinum 8280 CPU @ 2.70GHz with 8 physical cores and an NVIDIA RTX 2080 GPU. | | Card Prompts | Model deployment on alternate hardware and software will change model performance | | Metrics | Description | | ----------- | ----------- | | Model performance measures | Zero-shot Transfer | | Decision thresholds | - | | Approaches to uncertainty and variability | - | | Training and Evaluation Data | Description | | ----------- | ----------- | | Datasets | The dataset is called MIX 6, and contains around 1.4M images. The model was initialized with ImageNet-pretrained weights.| | Motivation | To build a robust monocular depth prediction network | | Preprocessing | "We resize the image such that the longer side is 384 pixels and train on random square crops of size 384. ... We perform random horizontal flips for data augmentation." See [Ranftl et al. (2021)](https://arxiv.org/abs/2103.13413) for more details. | ## Quantitative Analyses | Model | Training set | DIW WHDR | ETH3D AbsRel | Sintel AbsRel | KITTI δ>1.25 | NYU δ>1.25 | TUM δ>1.25 | | --- | --- | --- | --- | --- | --- | --- | --- | | DPT - Large | MIX 6 | 10.82 (-13.2%) | 0.089 (-31.2%) | 0.270 (-17.5%) | 8.46 (-64.6%) | 8.32 (-12.9%) | 9.97 (-30.3%) | | DPT - Hybrid | MIX 6 | 11.06 (-11.2%) | 0.093 (-27.6%) | 0.274 (-16.2%) | 11.56 (-51.6%) | 8.69 (-9.0%) | 10.89 (-23.2%) | | MiDaS | MIX 6 | 12.95 (+3.9%) | 0.116 (-10.5%) | 0.329 (+0.5%) | 16.08 (-32.7%) | 8.71 (-8.8%) | 12.51 (-12.5%) | MiDaS [30] | MIX 5 | 12.46 | 0.129 | 0.327 | 23.90 | 9.55 | 14.29 | | Li [22] | MD [22] | 23.15 | 0.181 | 0.385 | 36.29 | 27.52 | 29.54 | | Li [21] | MC [21] | 26.52 | 0.183 | 0.405 | 47.94 | 18.57 | 17.71 | | Wang [40] | WS [40] | 19.09 | 0.205 | 0.390 | 31.92 | 29.57 | 20.18 | | Xian [45] | RW [45] | 14.59 | 0.186 | 0.422 | 34.08 | 27.00 | 25.02 | | Casser [5] | CS [8] | 32.80 | 0.235 | 0.422 | 21.15 | 39.58 | 37.18 | Table 1. Comparison to the state of the art on monocular depth estimation. We evaluate zero-shot cross-dataset transfer according to the protocol defined in [30]. Relative performance is computed with respect to the original MiDaS model [30]. Lower is better for all metrics. ([Ranftl et al., 2021](https://arxiv.org/abs/2103.13413)) | Ethical Considerations | Description | | ----------- | ----------- | | Data | The training data come from multiple image datasets compiled together. | | Human life | The model is not intended to inform decisions central to human life or flourishing. It is an aggregated set of monocular depth image datasets. | | Mitigations | No additional risk mitigation strategies were considered during model development. | | Risks and harms | The extent of the risks involved by using the model remain unknown. | | Use cases | - | | Caveats and Recommendations | | ----------- | | Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. There are no additional caveats or recommendations for this model. | ### BibTeX entry and citation info ```bibtex @article{DBLP:journals/corr/abs-2103-13413, author = {Ren{\'{e}} Ranftl and Alexey Bochkovskiy and Vladlen Koltun}, title = {Vision Transformers for Dense Prediction}, journal = {CoRR}, volume = {abs/2103.13413}, year = {2021}, url = {https://arxiv.org/abs/2103.13413}, eprinttype = {arXiv}, eprint = {2103.13413}, timestamp = {Wed, 07 Apr 2021 15:31:46 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-2103-13413.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
rizvandwiki/gender-classification
rizvandwiki
"2023-05-18T11:16:33Z"
387,128
14
transformers
[ "transformers", "pytorch", "tensorboard", "safetensors", "vit", "image-classification", "huggingpics", "model-index", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
image-classification
"2022-12-06T08:53:43Z"
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: gender-classification results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.9244444370269775 --- # gender-classification Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### female ![female](images/female.jpg) #### male ![male](images/male.jpg)
alisawuffles/roberta-large-wanli
alisawuffles
"2023-06-14T04:58:48Z"
383,681
6
transformers
[ "transformers", "pytorch", "safetensors", "roberta", "text-classification", "en", "dataset:alisawuffles/WANLI", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text-classification
"2022-03-30T20:00:10Z"
--- language: - en tags: - text-classification widget: - text: "I almost forgot to eat lunch.</s></s>I didn't forget to eat lunch." - text: "I almost forgot to eat lunch.</s></s>I forgot to eat lunch." - text: "I ate lunch.</s></s>I almost forgot to eat lunch." datasets: - alisawuffles/WANLI --- This is an off-the-shelf roberta-large model finetuned on WANLI, the Worker-AI Collaborative NLI dataset ([Liu et al., 2022](https://aclanthology.org/2022.findings-emnlp.508/)). It outperforms the `roberta-large-mnli` model on eight out-of-domain test sets, including by 11% on HANS and 9% on Adversarial NLI. ### How to use ```python from transformers import RobertaTokenizer, RobertaForSequenceClassification model = RobertaForSequenceClassification.from_pretrained('alisawuffles/roberta-large-wanli') tokenizer = RobertaTokenizer.from_pretrained('alisawuffles/roberta-large-wanli') x = tokenizer("I almost forgot to eat lunch.", "I didn't forget to eat lunch.", return_tensors='pt', max_length=128, truncation=True) logits = model(**x).logits probs = logits.softmax(dim=1).squeeze(0) label_id = torch.argmax(probs).item() prediction = model.config.id2label[label_id] ``` ### Citation ``` @inproceedings{liu-etal-2022-wanli, title = "{WANLI}: Worker and {AI} Collaboration for Natural Language Inference Dataset Creation", author = "Liu, Alisa and Swayamdipta, Swabha and Smith, Noah A. and Choi, Yejin", booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022", month = dec, year = "2022", address = "Abu Dhabi, United Arab Emirates", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2022.findings-emnlp.508", pages = "6826--6847", abstract = "A recurring challenge of crowdsourcing NLP datasets at scale is that human writers often rely on repetitive patterns when crafting examples, leading to a lack of linguistic diversity. We introduce a novel approach for dataset creation based on worker and AI collaboration, which brings together the generative strength of language models and the evaluative strength of humans. Starting with an existing dataset, MultiNLI for natural language inference (NLI), our approach uses dataset cartography to automatically identify examples that demonstrate challenging reasoning patterns, and instructs GPT-3 to compose new examples with similar patterns. Machine generated examples are then automatically filtered, and finally revised and labeled by human crowdworkers. The resulting dataset, WANLI, consists of 107,885 NLI examples and presents unique empirical strengths over existing NLI datasets. Remarkably, training a model on WANLI improves performance on eight out-of-domain test sets we consider, including by 11{\%} on HANS and 9{\%} on Adversarial NLI, compared to training on the 4x larger MultiNLI. Moreover, it continues to be more effective than MultiNLI augmented with other NLI datasets. Our results demonstrate the promise of leveraging natural language generation techniques and re-imagining the role of humans in the dataset creation process.", } ```
textattack/bert-base-uncased-yelp-polarity
textattack
"2021-05-20T07:49:07Z"
381,842
2
transformers
[ "transformers", "pytorch", "jax", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text-classification
"2022-03-02T23:29:05Z"
## TextAttack Model Card This `bert-base-uncased` model was fine-tuned for sequence classification using TextAttack and the yelp_polarity dataset loaded using the `nlp` library. The model was fine-tuned for 5 epochs with a batch size of 16, a learning rate of 5e-05, and a maximum sequence length of 256. Since this was a classification task, the model was trained with a cross-entropy loss function. The best score the model achieved on this task was 0.9699473684210527, as measured by the eval set accuracy, found after 4 epochs. For more information, check out [TextAttack on Github](https://github.com/QData/TextAttack).
WinKawaks/vit-small-patch16-224
WinKawaks
"2023-03-18T22:00:21Z"
369,226
9
transformers
[ "transformers", "pytorch", "safetensors", "vit", "image-classification", "vision", "dataset:imagenet", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
"2022-03-02T23:29:05Z"
--- license: apache-2.0 tags: - vision - image-classification datasets: - imagenet widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace --- Google didn't publish vit-tiny and vit-small model checkpoints in Hugging Face. I converted the weights from the [timm repository](https://github.com/rwightman/pytorch-image-models). This model is used in the same way as [ViT-base](https://huggingface.co/google/vit-base-patch16-224). Note that [safetensors] model requires torch 2.0 environment.
google/owlv2-base-patch16-ensemble
google
"2024-03-17T12:53:36Z"
368,905
41
transformers
[ "transformers", "pytorch", "safetensors", "owlv2", "zero-shot-object-detection", "vision", "arxiv:2306.09683", "license:apache-2.0", "has_space", "region:us" ]
zero-shot-object-detection
"2023-10-13T09:27:09Z"
--- license: apache-2.0 tags: - vision - zero-shot-object-detection inference: false --- # Model Card: OWLv2 ## Model Details The OWLv2 model (short for Open-World Localization) was proposed in [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) by Matthias Minderer, Alexey Gritsenko, Neil Houlsby. OWLv2, like OWL-ViT, is a zero-shot text-conditioned object detection model that can be used to query an image with one or multiple text queries. The model uses CLIP as its multi-modal backbone, with a ViT-like Transformer to get visual features and a causal language model to get the text features. To use CLIP for detection, OWL-ViT removes the final token pooling layer of the vision model and attaches a lightweight classification and box head to each transformer output token. Open-vocabulary classification is enabled by replacing the fixed classification layer weights with the class-name embeddings obtained from the text model. The authors first train CLIP from scratch and fine-tune it end-to-end with the classification and box heads on standard detection datasets using a bipartite matching loss. One or multiple text queries per image can be used to perform zero-shot text-conditioned object detection. ### Model Date June 2023 ### Model Type The model uses a CLIP backbone with a ViT-B/16 Transformer architecture as an image encoder and uses a masked self-attention Transformer as a text encoder. These encoders are trained to maximize the similarity of (image, text) pairs via a contrastive loss. The CLIP backbone is trained from scratch and fine-tuned together with the box and class prediction heads with an object detection objective. ### Documents - [OWLv2 Paper](https://arxiv.org/abs/2306.09683) ### Use with Transformers ```python import requests from PIL import Image import torch from transformers import Owlv2Processor, Owlv2ForObjectDetection processor = Owlv2Processor.from_pretrained("google/owlv2-base-patch16-ensemble") model = Owlv2ForObjectDetection.from_pretrained("google/owlv2-base-patch16-ensemble") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) texts = [["a photo of a cat", "a photo of a dog"]] inputs = processor(text=texts, images=image, return_tensors="pt") outputs = model(**inputs) # Target image sizes (height, width) to rescale box predictions [batch_size, 2] target_sizes = torch.Tensor([image.size[::-1]]) # Convert outputs (bounding boxes and class logits) to COCO API results = processor.post_process_object_detection(outputs=outputs, threshold=0.1, target_sizes=target_sizes) i = 0 # Retrieve predictions for the first image for the corresponding text queries text = texts[i] boxes, scores, labels = results[i]["boxes"], results[i]["scores"], results[i]["labels"] # Print detected objects and rescaled box coordinates for box, score, label in zip(boxes, scores, labels): box = [round(i, 2) for i in box.tolist()] print(f"Detected {text[label]} with confidence {round(score.item(), 3)} at location {box}") ``` ## Model Use ### Intended Use The model is intended as a research output for research communities. We hope that this model will enable researchers to better understand and explore zero-shot, text-conditioned object detection. We also hope it can be used for interdisciplinary studies of the potential impact of such models, especially in areas that commonly require identifying objects whose label is unavailable during training. #### Primary intended uses The primary intended users of these models are AI researchers. We primarily imagine the model will be used by researchers to better understand robustness, generalization, and other capabilities, biases, and constraints of computer vision models. ## Data The CLIP backbone of the model was trained on publicly available image-caption data. This was done through a combination of crawling a handful of websites and using commonly-used pre-existing image datasets such as [YFCC100M](http://projects.dfki.uni-kl.de/yfcc100m/). A large portion of the data comes from our crawling of the internet. This means that the data is more representative of people and societies most connected to the internet. The prediction heads of OWL-ViT, along with the CLIP backbone, are fine-tuned on publicly available object detection datasets such as [COCO](https://cocodataset.org/#home) and [OpenImages](https://storage.googleapis.com/openimages/web/index.html). (to be updated for v2) ### BibTeX entry and citation info ```bibtex @misc{minderer2023scaling, title={Scaling Open-Vocabulary Object Detection}, author={Matthias Minderer and Alexey Gritsenko and Neil Houlsby}, year={2023}, eprint={2306.09683}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
SG161222/Realistic_Vision_V5.1_noVAE
SG161222
"2024-04-12T15:39:27Z"
365,572
142
diffusers
[ "diffusers", "safetensors", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
null
"2023-07-31T05:20:51Z"
--- license: creativeml-openrail-m --- <b>This model is available on <a href="https://www.mage.space/">Mage.Space</a> (main sponsor)</b><br> <b>You can support me directly on Boosty - https://boosty.to/sg_161222</b><br> <b>Please read this!</b><br> For version 5.1 it is recommended to use with VAE (to improve generation quality and get rid of artifacts): https://huggingface.co/stabilityai/sd-vae-ft-mse-original<br> <hr/> <b>The recommended negative prompt:</b> (deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime:1.4), text, close up, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck<br> <b>OR</b><br> (deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime, mutated hands and fingers:1.4), (deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, disconnected limbs, mutation, mutated, ugly, disgusting, amputation <b>Euler A or DPM++ 2M Karras<br> CFG Scale 3,5 - 7<br> Hires. fix with 4x-UltraSharp upscaler<br> 0 Hires steps and Denoising strength 0.25-0.7<br> Upscale by 1.1-2.0</b>
nvidia/speakerverification_en_titanet_large
nvidia
"2023-11-14T16:58:18Z"
364,951
52
nemo
[ "nemo", "speaker", "speech", "audio", "speaker-verification", "speaker-recognition", "speaker-diarization", "titanet", "NeMo", "pytorch", "en", "dataset:VOXCELEB-1", "dataset:VOXCELEB-2", "dataset:FISHER", "dataset:switchboard", "dataset:librispeech_asr", "dataset:SRE", "license:cc-by-4.0", "model-index", "has_space", "region:us" ]
null
"2022-07-15T00:26:00Z"
--- language: - en library_name: nemo datasets: - VOXCELEB-1 - VOXCELEB-2 - FISHER - switchboard - librispeech_asr - SRE thumbnail: null tags: - speaker - speech - audio - speaker-verification - speaker-recognition - speaker-diarization - titanet - NeMo - pytorch license: cc-by-4.0 widget: - src: https://huggingface.co/nvidia/speakerverification_en_titanet_large/resolve/main/an255-fash-b.wav example_title: Speech sample 1 - src: https://huggingface.co/nvidia/speakerverification_en_titanet_large/resolve/main/cen7-fash-b.wav example_title: Speech sample 2 model-index: - name: speakerverification_en_titanet_large results: - task: name: Speaker Verification type: speaker-verification dataset: name: voxceleb1 type: voxceleb1-O config: clean split: test args: language: en metrics: - name: Test EER type: eer value: 0.66 - task: type: Speaker Diarization name: speaker-diarization dataset: name: ami-mixheadset type: ami_diarization config: oracle-vad-known-number-of-speakers split: test args: language: en metrics: - name: Test DER type: der value: 1.73 - task: type: Speaker Diarization name: speaker-diarization dataset: name: ami-lapel type: ami_diarization config: oracle-vad-known-number-of-speakers split: test args: language: en metrics: - name: Test DER type: der value: 2.03 - task: type: Speaker Diarization name: speaker-diarization dataset: name: ch109 type: callhome_diarization config: oracle-vad-known-number-of-speakers split: test args: language: en metrics: - name: Test DER type: der value: 1.19 - task: type: Speaker Diarization name: speaker-diarization dataset: name: nist-sre-2000 type: nist-sre_diarization config: oracle-vad-known-number-of-speakers split: test args: language: en metrics: - name: Test DER type: der value: 6.73 --- # NVIDIA TitaNet-Large (en-US) <style> img { display: inline; } </style> | [![Model architecture](https://img.shields.io/badge/Model_Arch-TitaNet--Large-lightgrey#model-badge)](#model-architecture) | [![Model size](https://img.shields.io/badge/Params-23M-lightgrey#model-badge)](#model-architecture) | [![Language](https://img.shields.io/badge/Language-en--US-lightgrey#model-badge)](#datasets) This model extracts speaker embeddings from given speech, which is the backbone for speaker verification and diarization tasks. It is a "large" version of TitaNet (around 23M parameters) models. See the [model architecture](#model-architecture) section and [NeMo documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/asr/speaker_recognition/models.html#titanet) for complete architecture details. ## NVIDIA NeMo: Training To train, fine-tune or play with the model you will need to install [NVIDIA NeMo](https://github.com/NVIDIA/NeMo). We recommend you install it after you've installed the latest Pytorch version. ``` pip install nemo_toolkit['all'] ``` ## How to Use this Model The model is available for use in the NeMo toolkit [3] and can be used as a pre-trained checkpoint for inference or for fine-tuning on another dataset. ### Automatically instantiate the model ```python import nemo.collections.asr as nemo_asr speaker_model = nemo_asr.models.EncDecSpeakerLabelModel.from_pretrained("nvidia/speakerverification_en_titanet_large") ``` ### Embedding Extraction Using ```python emb = speaker_model.get_embedding("an255-fash-b.wav") ``` ### Verifying two utterances (Speaker Verification) Now to check if two audio files are from the same speaker or not, simply do: ```python speaker_model.verify_speakers("an255-fash-b.wav","cen7-fash-b.wav") ``` ### Extracting Embeddings for more audio files To extract embeddings from a bunch of audio files: Write audio files to a `manifest.json` file with lines as in format: ```json {"audio_filepath": "<absolute path to dataset>/audio_file.wav", "duration": "duration of file in sec", "label": "speaker_id"} ``` Then running following script will extract embeddings and writes to current working directory: ```shell python <NeMo_root>/examples/speaker_tasks/recognition/extract_speaker_embeddings.py --manifest=manifest.json ``` ### Input This model accepts 16000 KHz Mono-channel Audio (wav files) as input. ### Output This model provides speaker embeddings for an audio file. ## Model Architecture TitaNet model is a depth-wise separable conv1D model [1] for Speaker Verification and diarization tasks. You may find more info on the detail of this model here: [TitaNet-Model](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speaker_recognition/models.html). ## Training The NeMo toolkit [3] was used for training the models for over several hundred epochs. These model are trained with this [example script](https://github.com/NVIDIA/NeMo/blob/main/examples/speaker_tasks/recognition/speaker_reco.py) and this [base config](https://github.com/NVIDIA/NeMo/blob/main/examples/speaker_tasks/recognition/conf/titanet-large.yaml). ### Datasets All the models in this collection are trained on a composite dataset comprising several thousand hours of English speech: - Voxceleb-1 - Voxceleb-2 - Fisher - Switchboard - Librispeech - SRE (2004-2010) ## Performance Performances of the these models are reported in terms of Equal Error Rate (EER%) on speaker verification evaluation trial files and as Diarization Error Rate (DER%) on diarization test sessions. * Speaker Verification (EER%) | Version | Model | Model Size | VoxCeleb1 (Cleaned trial file) | |---------|--------------|-----|---------------| | 1.10.0 | TitaNet-Large | 23M | 0.66 | * Speaker Diarization (DER%) | Version | Model | Model Size | Evaluation Condition | NIST SRE 2000 | AMI (Lapel) | AMI (MixHeadset) | CH109 | |---------|--------------|-----|----------------------|---------------|-------------|------------------|-------| | 1.10.0 | TitaNet-Large | 23M | Oracle VAD KNOWN # of Speakers | 6.73 | 2.03 | 1.73 | 1.19 | | 1.10.0 | TitaNet-Large | 23M | Oracle VAD UNKNOWN # of Speakers | 5.38 | 2.03 | 1.89 | 1.63 | ## Limitations This model is trained on both telephonic and non-telephonic speech from voxceleb datasets, Fisher and switch board. If your domain of data differs from trained data or doesnot show relatively good performance consider finetuning for that speech domain. ## NVIDIA Riva: Deployment [NVIDIA Riva](https://developer.nvidia.com/riva), is an accelerated speech AI SDK deployable on-prem, in all clouds, multi-cloud, hybrid, on edge, and embedded. Additionally, Riva provides: * World-class out-of-the-box accuracy for the most common languages with model checkpoints trained on proprietary data with hundreds of thousands of GPU-compute hours * Best in class accuracy with run-time word boosting (e.g., brand and product names) and customization of acoustic model, language model, and inverse text normalization * Streaming speech recognition, Kubernetes compatible scaling, and enterprise-grade support Although this model isn’t supported yet by Riva, the [list of supported models is here](https://huggingface.co/models?other=Riva). Check out [Riva live demo](https://developer.nvidia.com/riva#demos). ## References [1] [TitaNet: Neural Model for Speaker Representation with 1D Depth-wise Separable convolutions and global context](https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9746806) [2] [NVIDIA NeMo Toolkit](https://github.com/NVIDIA/NeMo) ## Licence License to use this model is covered by the [CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/). By downloading the public and release version of the model, you accept the terms and conditions of the [CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/) license.
fabiochiu/t5-base-tag-generation
fabiochiu
"2023-08-03T07:55:12Z"
362,590
42
transformers
[ "transformers", "pytorch", "tensorboard", "safetensors", "t5", "text2text-generation", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text2text-generation
"2022-05-19T08:45:13Z"
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: t5-base-tag-generation results: [] widget: - text: "Python is a high-level, interpreted, general-purpose programming language. Its design philosophy emphasizes code readability with the use of significant indentation. Python is dynamically-typed and garbage-collected." example_title: "Programming" --- # Model description This model is [t5-base](https://huggingface.co/t5-base) fine-tuned on the [190k Medium Articles](https://www.kaggle.com/datasets/fabiochiusano/medium-articles) dataset for predicting article tags using the article textual content as input. While usually formulated as a multi-label classification problem, this model deals with _tag generation_ as a text2text generation task (inspiration from [text2tags](https://huggingface.co/efederici/text2tags)). # How to use the model ```python from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import nltk nltk.download('punkt') tokenizer = AutoTokenizer.from_pretrained("fabiochiu/t5-base-tag-generation") model = AutoModelForSeq2SeqLM.from_pretrained("fabiochiu/t5-base-tag-generation") text = """ Python is a high-level, interpreted, general-purpose programming language. Its design philosophy emphasizes code readability with the use of significant indentation. Python is dynamically-typed and garbage-collected. """ inputs = tokenizer([text], max_length=512, truncation=True, return_tensors="pt") output = model.generate(**inputs, num_beams=8, do_sample=True, min_length=10, max_length=64) decoded_output = tokenizer.batch_decode(output, skip_special_tokens=True)[0] tags = list(set(decoded_output.strip().split(", "))) print(tags) # ['Programming', 'Code', 'Software Development', 'Programming Languages', # 'Software', 'Developer', 'Python', 'Software Engineering', 'Science', # 'Engineering', 'Technology', 'Computer Science', 'Coding', 'Digital', 'Tech', # 'Python Programming'] ``` ## Data cleaning The dataset is composed of Medium articles and their tags. However, each Medium article can have at most five tags, therefore the author needs to choose what he/she believes are the best tags (mainly for SEO-related purposes). This means that an article with the "Python" tag may have not the "Programming Languages" tag, even though the first implies the latter. To clean the dataset accounting for this problem, a hand-made taxonomy of about 1000 tags was built. Using the taxonomy, the tags of each articles have been augmented (e.g. an article with the "Python" tag will have the "Programming Languages" tag as well, as the taxonomy says that "Python" is part of "Programming Languages"). The taxonomy is not public, if you are interested in it please send an email at chiusanofabio94@gmail.com. ## Training and evaluation data The model has been trained on a single epoch spanning about 50000 articles, evaluating on 1000 random articles not used during training. ## Evaluation results - eval_loss: 0.8474 - eval_rouge1: 38.6033 - eval_rouge2: 20.5952 - eval_rougeL: 36.4458 - eval_rougeLsum: 36.3202 - eval_gen_len: 15.257 # average number of generated tokens ## Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
albert/albert-large-v2
albert
"2024-02-19T10:58:48Z"
361,858
13
transformers
[ "transformers", "pytorch", "tf", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
"2022-03-02T23:29:04Z"
--- language: en license: apache-2.0 datasets: - bookcorpus - wikipedia --- # ALBERT Large v2 Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1909.11942) and first released in [this repository](https://github.com/google-research/albert). This model, as all ALBERT models, is uncased: it does not make a difference between english and English. Disclaimer: The team releasing ALBERT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description ALBERT is a transformers model pretrained on a large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was pretrained with two objectives: - Masked language modeling (MLM): taking a sentence, the model randomly masks 15% of the words in the input then run the entire masked sentence through the model and has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like GPT which internally mask the future tokens. It allows the model to learn a bidirectional representation of the sentence. - Sentence Ordering Prediction (SOP): ALBERT uses a pretraining loss based on predicting the ordering of two consecutive segments of text. This way, the model learns an inner representation of the English language that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled sentences for instance, you can train a standard classifier using the features produced by the ALBERT model as inputs. ALBERT is particular in that it shares its layers across its Transformer. Therefore, all layers have the same weights. Using repeating layers results in a small memory footprint, however, the computational cost remains similar to a BERT-like architecture with the same number of hidden layers as it has to iterate through the same number of (repeating) layers. This is the second version of the large model. Version 2 is different from version 1 due to different dropout rates, additional training data, and longer training. It has better results in nearly all downstream tasks. This model has the following configuration: - 24 repeating layers - 128 embedding dimension - 1024 hidden dimension - 16 attention heads - 17M parameters ## Intended uses & limitations You can use the raw model for either masked language modeling or next sentence prediction, but it's mostly intended to be fine-tuned on a downstream task. See the [model hub](https://huggingface.co/models?filter=albert) to look for fine-tuned versions on a task that interests you. Note that this model is primarily aimed at being fine-tuned on tasks that use the whole sentence (potentially masked) to make decisions, such as sequence classification, token classification or question answering. For tasks such as text generation you should look at model like GPT2. ### How to use You can use this model directly with a pipeline for masked language modeling: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='albert-large-v2') >>> unmasker("Hello I'm a [MASK] model.") [ { "sequence":"[CLS] hello i'm a modeling model.[SEP]", "score":0.05816134437918663, "token":12807, "token_str":"▁modeling" }, { "sequence":"[CLS] hello i'm a modelling model.[SEP]", "score":0.03748830780386925, "token":23089, "token_str":"▁modelling" }, { "sequence":"[CLS] hello i'm a model model.[SEP]", "score":0.033725276589393616, "token":1061, "token_str":"▁model" }, { "sequence":"[CLS] hello i'm a runway model.[SEP]", "score":0.017313428223133087, "token":8014, "token_str":"▁runway" }, { "sequence":"[CLS] hello i'm a lingerie model.[SEP]", "score":0.014405295252799988, "token":29104, "token_str":"▁lingerie" } ] ``` Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import AlbertTokenizer, AlbertModel tokenizer = AlbertTokenizer.from_pretrained('albert-large-v2') model = AlbertModel.from_pretrained("albert-large-v2") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import AlbertTokenizer, TFAlbertModel tokenizer = AlbertTokenizer.from_pretrained('albert-large-v2') model = TFAlbertModel.from_pretrained("albert-large-v2") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ### Limitations and bias Even if the training data used for this model could be characterized as fairly neutral, this model can have biased predictions: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='albert-large-v2') >>> unmasker("The man worked as a [MASK].") [ { "sequence":"[CLS] the man worked as a chauffeur.[SEP]", "score":0.029577180743217468, "token":28744, "token_str":"▁chauffeur" }, { "sequence":"[CLS] the man worked as a janitor.[SEP]", "score":0.028865724802017212, "token":29477, "token_str":"▁janitor" }, { "sequence":"[CLS] the man worked as a shoemaker.[SEP]", "score":0.02581118606030941, "token":29024, "token_str":"▁shoemaker" }, { "sequence":"[CLS] the man worked as a blacksmith.[SEP]", "score":0.01849772222340107, "token":21238, "token_str":"▁blacksmith" }, { "sequence":"[CLS] the man worked as a lawyer.[SEP]", "score":0.01820771023631096, "token":3672, "token_str":"▁lawyer" } ] >>> unmasker("The woman worked as a [MASK].") [ { "sequence":"[CLS] the woman worked as a receptionist.[SEP]", "score":0.04604868218302727, "token":25331, "token_str":"▁receptionist" }, { "sequence":"[CLS] the woman worked as a janitor.[SEP]", "score":0.028220869600772858, "token":29477, "token_str":"▁janitor" }, { "sequence":"[CLS] the woman worked as a paramedic.[SEP]", "score":0.0261906236410141, "token":23386, "token_str":"▁paramedic" }, { "sequence":"[CLS] the woman worked as a chauffeur.[SEP]", "score":0.024797942489385605, "token":28744, "token_str":"▁chauffeur" }, { "sequence":"[CLS] the woman worked as a waitress.[SEP]", "score":0.024124596267938614, "token":13678, "token_str":"▁waitress" } ] ``` This bias will also affect all fine-tuned versions of this model. ## Training data The ALBERT model was pretrained on [BookCorpus](https://yknzhu.wixsite.com/mbweb), a dataset consisting of 11,038 unpublished books and [English Wikipedia](https://en.wikipedia.org/wiki/English_Wikipedia) (excluding lists, tables and headers). ## Training procedure ### Preprocessing The texts are lowercased and tokenized using SentencePiece and a vocabulary size of 30,000. The inputs of the model are then of the form: ``` [CLS] Sentence A [SEP] Sentence B [SEP] ``` ### Training The ALBERT procedure follows the BERT setup. The details of the masking procedure for each sentence are the following: - 15% of the tokens are masked. - In 80% of the cases, the masked tokens are replaced by `[MASK]`. - In 10% of the cases, the masked tokens are replaced by a random token (different) from the one they replace. - In the 10% remaining cases, the masked tokens are left as is. ## Evaluation results When fine-tuned on downstream tasks, the ALBERT models achieve the following results: | | Average | SQuAD1.1 | SQuAD2.0 | MNLI | SST-2 | RACE | |----------------|----------|----------|----------|----------|----------|----------| |V2 | |ALBERT-base |82.3 |90.2/83.2 |82.1/79.3 |84.6 |92.9 |66.8 | |ALBERT-large |85.7 |91.8/85.2 |84.9/81.8 |86.5 |94.9 |75.2 | |ALBERT-xlarge |87.9 |92.9/86.4 |87.9/84.1 |87.9 |95.4 |80.7 | |ALBERT-xxlarge |90.9 |94.6/89.1 |89.8/86.9 |90.6 |96.8 |86.8 | |V1 | |ALBERT-base |80.1 |89.3/82.3 | 80.0/77.1|81.6 |90.3 | 64.0 | |ALBERT-large |82.4 |90.6/83.9 | 82.3/79.4|83.5 |91.7 | 68.5 | |ALBERT-xlarge |85.5 |92.5/86.1 | 86.1/83.1|86.4 |92.4 | 74.8 | |ALBERT-xxlarge |91.0 |94.8/89.3 | 90.2/87.4|90.8 |96.9 | 86.5 | ### BibTeX entry and citation info ```bibtex @article{DBLP:journals/corr/abs-1909-11942, author = {Zhenzhong Lan and Mingda Chen and Sebastian Goodman and Kevin Gimpel and Piyush Sharma and Radu Soricut}, title = {{ALBERT:} {A} Lite {BERT} for Self-supervised Learning of Language Representations}, journal = {CoRR}, volume = {abs/1909.11942}, year = {2019}, url = {http://arxiv.org/abs/1909.11942}, archivePrefix = {arXiv}, eprint = {1909.11942}, timestamp = {Fri, 27 Sep 2019 13:04:21 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-1909-11942.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
stabilityai/stable-diffusion-2-base
stabilityai
"2023-07-05T16:19:03Z"
361,311
324
diffusers
[ "diffusers", "safetensors", "stable-diffusion", "text-to-image", "arxiv:2112.10752", "arxiv:2202.00512", "arxiv:1910.09700", "license:openrail++", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
"2022-11-23T17:41:31Z"
--- license: openrail++ tags: - stable-diffusion - text-to-image --- # Stable Diffusion v2-base Model Card This model card focuses on the model associated with the Stable Diffusion v2-base model, available [here](https://github.com/Stability-AI/stablediffusion). The model is trained from scratch 550k steps at resolution `256x256` on a subset of [LAION-5B](https://laion.ai/blog/laion-5b/) filtered for explicit pornographic material, using the [LAION-NSFW classifier](https://github.com/LAION-AI/CLIP-based-NSFW-Detector) with `punsafe=0.1` and an [aesthetic score](https://github.com/christophschuhmann/improved-aesthetic-predictor) >= `4.5`. Then it is further trained for 850k steps at resolution `512x512` on the same dataset on images with resolution `>= 512x512`. ![image](https://github.com/Stability-AI/stablediffusion/blob/main/assets/stable-samples/txt2img/merged-0003.png?raw=true) - Use it with the [`stablediffusion`](https://github.com/Stability-AI/stablediffusion) repository: download the `512-base-ema.ckpt` [here](https://huggingface.co/stabilityai/stable-diffusion-2-base/resolve/main/512-base-ema.ckpt). - Use it with 🧨 [`diffusers`](https://huggingface.co/stabilityai/stable-diffusion-2-base#examples) ## Model Details - **Developed by:** Robin Rombach, Patrick Esser - **Model type:** Diffusion-based text-to-image generation model - **Language(s):** English - **License:** [CreativeML Open RAIL++-M License](https://huggingface.co/stabilityai/stable-diffusion-2/blob/main/LICENSE-MODEL) - **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a [Latent Diffusion Model](https://arxiv.org/abs/2112.10752) that uses a fixed, pretrained text encoder ([OpenCLIP-ViT/H](https://github.com/mlfoundations/open_clip)). - **Resources for more information:** [GitHub Repository](https://github.com/Stability-AI/). - **Cite as:** @InProceedings{Rombach_2022_CVPR, author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn}, title = {High-Resolution Image Synthesis With Latent Diffusion Models}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2022}, pages = {10684-10695} } ## Examples Using the [🤗's Diffusers library](https://github.com/huggingface/diffusers) to run Stable Diffusion 2 in a simple and efficient manner. ```bash pip install diffusers transformers accelerate scipy safetensors ``` Running the pipeline (if you don't swap the scheduler it will run with the default PNDM/PLMS scheduler, in this example we are swapping it to EulerDiscreteScheduler): ```python from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler import torch model_id = "stabilityai/stable-diffusion-2-base" # Use the Euler scheduler here instead scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler") pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" image = pipe(prompt).images[0] image.save("astronaut_rides_horse.png") ``` **Notes**: - Despite not being a dependency, we highly recommend you to install [xformers](https://github.com/facebookresearch/xformers) for memory efficient attention (better performance) - If you have low GPU RAM available, make sure to add a `pipe.enable_attention_slicing()` after sending it to `cuda` for less VRAM usage (to the cost of speed) # Uses ## Direct Use The model is intended for research purposes only. Possible research areas and tasks include - Safe deployment of models which have the potential to generate harmful content. - Probing and understanding the limitations and biases of generative models. - Generation of artworks and use in design and other artistic processes. - Applications in educational or creative tools. - Research on generative models. Excluded uses are described below. ### Misuse, Malicious Use, and Out-of-Scope Use _Note: This section is originally taken from the [DALLE-MINI model card](https://huggingface.co/dalle-mini/dalle-mini), was used for Stable Diffusion v1, but applies in the same way to Stable Diffusion v2_. The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes. #### Out-of-Scope Use The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model. #### Misuse and Malicious Use Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to: - Generating demeaning, dehumanizing, or otherwise harmful representations of people or their environments, cultures, religions, etc. - Intentionally promoting or propagating discriminatory content or harmful stereotypes. - Impersonating individuals without their consent. - Sexual content without consent of the people who might see it. - Mis- and disinformation - Representations of egregious violence and gore - Sharing of copyrighted or licensed material in violation of its terms of use. - Sharing content that is an alteration of copyrighted or licensed material in violation of its terms of use. ## Limitations and Bias ### Limitations - The model does not achieve perfect photorealism - The model cannot render legible text - The model does not perform well on more difficult tasks which involve compositionality, such as rendering an image corresponding to “A red cube on top of a blue sphere” - Faces and people in general may not be generated properly. - The model was trained mainly with English captions and will not work as well in other languages. - The autoencoding part of the model is lossy - The model was trained on a subset of the large-scale dataset [LAION-5B](https://laion.ai/blog/laion-5b/), which contains adult, violent and sexual content. To partially mitigate this, we have filtered the dataset using LAION's NFSW detector (see Training section). ### Bias While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases. Stable Diffusion vw was primarily trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/), which consists of images that are limited to English descriptions. Texts and images from communities and cultures that use other languages are likely to be insufficiently accounted for. This affects the overall output of the model, as white and western cultures are often set as the default. Further, the ability of the model to generate content with non-English prompts is significantly worse than with English-language prompts. Stable Diffusion v2 mirrors and exacerbates biases to such a degree that viewer discretion must be advised irrespective of the input or its intent. ## Training **Training Data** The model developers used the following dataset for training the model: - LAION-5B and subsets (details below). The training data is further filtered using LAION's NSFW detector, with a "p_unsafe" score of 0.1 (conservative). For more details, please refer to LAION-5B's [NeurIPS 2022](https://openreview.net/forum?id=M3Y74vmsMcY) paper and reviewer discussions on the topic. **Training Procedure** Stable Diffusion v2 is a latent diffusion model which combines an autoencoder with a diffusion model that is trained in the latent space of the autoencoder. During training, - Images are encoded through an encoder, which turns images into latent representations. The autoencoder uses a relative downsampling factor of 8 and maps images of shape H x W x 3 to latents of shape H/f x W/f x 4 - Text prompts are encoded through the OpenCLIP-ViT/H text-encoder. - The output of the text encoder is fed into the UNet backbone of the latent diffusion model via cross-attention. - The loss is a reconstruction objective between the noise that was added to the latent and the prediction made by the UNet. We also use the so-called _v-objective_, see https://arxiv.org/abs/2202.00512. We currently provide the following checkpoints: - `512-base-ema.ckpt`: 550k steps at resolution `256x256` on a subset of [LAION-5B](https://laion.ai/blog/laion-5b/) filtered for explicit pornographic material, using the [LAION-NSFW classifier](https://github.com/LAION-AI/CLIP-based-NSFW-Detector) with `punsafe=0.1` and an [aesthetic score](https://github.com/christophschuhmann/improved-aesthetic-predictor) >= `4.5`. 850k steps at resolution `512x512` on the same dataset with resolution `>= 512x512`. - `768-v-ema.ckpt`: Resumed from `512-base-ema.ckpt` and trained for 150k steps using a [v-objective](https://arxiv.org/abs/2202.00512) on the same dataset. Resumed for another 140k steps on a `768x768` subset of our dataset. - `512-depth-ema.ckpt`: Resumed from `512-base-ema.ckpt` and finetuned for 200k steps. Added an extra input channel to process the (relative) depth prediction produced by [MiDaS](https://github.com/isl-org/MiDaS) (`dpt_hybrid`) which is used as an additional conditioning. The additional input channels of the U-Net which process this extra information were zero-initialized. - `512-inpainting-ema.ckpt`: Resumed from `512-base-ema.ckpt` and trained for another 200k steps. Follows the mask-generation strategy presented in [LAMA](https://github.com/saic-mdal/lama) which, in combination with the latent VAE representations of the masked image, are used as an additional conditioning. The additional input channels of the U-Net which process this extra information were zero-initialized. The same strategy was used to train the [1.5-inpainting checkpoint](https://github.com/saic-mdal/lama). - `x4-upscaling-ema.ckpt`: Trained for 1.25M steps on a 10M subset of LAION containing images `>2048x2048`. The model was trained on crops of size `512x512` and is a text-guided [latent upscaling diffusion model](https://arxiv.org/abs/2112.10752). In addition to the textual input, it receives a `noise_level` as an input parameter, which can be used to add noise to the low-resolution input according to a [predefined diffusion schedule](configs/stable-diffusion/x4-upscaling.yaml). - **Hardware:** 32 x 8 x A100 GPUs - **Optimizer:** AdamW - **Gradient Accumulations**: 1 - **Batch:** 32 x 8 x 2 x 4 = 2048 - **Learning rate:** warmup to 0.0001 for 10,000 steps and then kept constant ## Evaluation Results Evaluations with different classifier-free guidance scales (1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0) and 50 steps DDIM sampling steps show the relative improvements of the checkpoints: ![pareto](model-variants.jpg) Evaluated using 50 DDIM steps and 10000 random prompts from the COCO2017 validation set, evaluated at 512x512 resolution. Not optimized for FID scores. ## Environmental Impact **Stable Diffusion v1** **Estimated Emissions** Based on that information, we estimate the following CO2 emissions using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). The hardware, runtime, cloud provider, and compute region were utilized to estimate the carbon impact. - **Hardware Type:** A100 PCIe 40GB - **Hours used:** 200000 - **Cloud Provider:** AWS - **Compute Region:** US-east - **Carbon Emitted (Power consumption x Time x Carbon produced based on location of power grid):** 15000 kg CO2 eq. ## Citation @InProceedings{Rombach_2022_CVPR, author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn}, title = {High-Resolution Image Synthesis With Latent Diffusion Models}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2022}, pages = {10684-10695} } *This model card was written by: Robin Rombach, Patrick Esser and David Ha and is based on the [Stable Diffusion v1](https://github.com/CompVis/stable-diffusion/blob/main/Stable_Diffusion_v1_Model_Card.md) and [DALL-E Mini model card](https://huggingface.co/dalle-mini/dalle-mini).*
duongttr/job-candidiate-matching-sentbert
duongttr
"2023-05-05T14:47:14Z"
360,945
3
sentence-transformers
[ "sentence-transformers", "pytorch", "bert", "feature-extraction", "sentence-similarity", "endpoints_compatible", "region:us" ]
sentence-similarity
"2023-05-05T14:43:32Z"
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 13952 with parameters: ``` {'batch_size': 14, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.ContrastiveLoss.ContrastiveLoss` with parameters: ``` {'distance_metric': 'SiameseDistanceMetric.COSINE_DISTANCE', 'margin': 0.5, 'size_average': True} ``` Parameters of the fit()-Method: ``` { "epochs": 10, "evaluation_steps": 2000, "evaluator": "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.EmbeddingSimilarityEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 10000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': True}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) (2): Dense({'in_features': 768, 'out_features': 384, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
siebert/sentiment-roberta-large-english
siebert
"2023-04-02T16:25:45Z"
360,034
99
transformers
[ "transformers", "pytorch", "tf", "jax", "roberta", "text-classification", "sentiment", "twitter", "reviews", "siebert", "en", "arxiv:1907.11692", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text-classification
"2022-03-02T23:29:05Z"
--- language: "en" tags: - sentiment - twitter - reviews - siebert --- ## SiEBERT - English-Language Sentiment Classification # Overview This model ("SiEBERT", prefix for "Sentiment in English") is a fine-tuned checkpoint of [RoBERTa-large](https://huggingface.co/roberta-large) ([Liu et al. 2019](https://arxiv.org/pdf/1907.11692.pdf)). It enables reliable binary sentiment analysis for various types of English-language text. For each instance, it predicts either positive (1) or negative (0) sentiment. The model was fine-tuned and evaluated on 15 data sets from diverse text sources to enhance generalization across different types of texts (reviews, tweets, etc.). Consequently, it outperforms models trained on only one type of text (e.g., movie reviews from the popular SST-2 benchmark) when used on new data as shown below. # Predictions on a data set If you want to predict sentiment for your own data, we provide an example script via [Google Colab](https://colab.research.google.com/notebooks/intro.ipynb). You can load your data to a Google Drive and run the script for free on a Colab GPU. Set-up only takes a few minutes. We suggest that you manually label a subset of your data to evaluate performance for your use case. For performance benchmark values across various sentiment analysis contexts, please refer to our paper ([Hartmann et al. 2022](https://www.sciencedirect.com/science/article/pii/S0167811622000477?via%3Dihub)). [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/chrsiebert/sentiment-roberta-large-english/blob/main/sentiment_roberta_prediction_example.ipynb) # Use in a Hugging Face pipeline The easiest way to use the model for single predictions is Hugging Face's [sentiment analysis pipeline](https://huggingface.co/transformers/quicktour.html#getting-started-on-a-task-with-a-pipeline), which only needs a couple lines of code as shown in the following example: ``` from transformers import pipeline sentiment_analysis = pipeline("sentiment-analysis",model="siebert/sentiment-roberta-large-english") print(sentiment_analysis("I love this!")) ``` [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/chrsiebert/sentiment-roberta-large-english/blob/main/sentiment_roberta_pipeline.ipynb) # Use for further fine-tuning The model can also be used as a starting point for further fine-tuning of RoBERTa on your specific data. Please refer to Hugging Face's [documentation](https://huggingface.co/docs/transformers/training) for further details and example code. # Performance To evaluate the performance of our general-purpose sentiment analysis model, we set aside an evaluation set from each data set, which was not used for training. On average, our model outperforms a [DistilBERT-based model](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) (which is solely fine-tuned on the popular SST-2 data set) by more than 15 percentage points (78.1 vs. 93.2 percent, see table below). As a robustness check, we evaluate the model in a leave-one-out manner (training on 14 data sets, evaluating on the one left out), which decreases model performance by only about 3 percentage points on average and underscores its generalizability. Model performance is given as evaluation set accuracy in percent. |Dataset|DistilBERT SST-2|This model| |---|---|---| |McAuley and Leskovec (2013) (Reviews)|84.7|98.0| |McAuley and Leskovec (2013) (Review Titles)|65.5|87.0| |Yelp Academic Dataset|84.8|96.5| |Maas et al. (2011)|80.6|96.0| |Kaggle|87.2|96.0| |Pang and Lee (2005)|89.7|91.0| |Nakov et al. (2013)|70.1|88.5| |Shamma (2009)|76.0|87.0| |Blitzer et al. (2007) (Books)|83.0|92.5| |Blitzer et al. (2007) (DVDs)|84.5|92.5| |Blitzer et al. (2007) (Electronics)|74.5|95.0| |Blitzer et al. (2007) (Kitchen devices)|80.0|98.5| |Pang et al. (2002)|73.5|95.5| |Speriosu et al. (2011)|71.5|85.5| |Hartmann et al. (2019)|65.5|98.0| |**Average**|**78.1**|**93.2**| # Fine-tuning hyperparameters - learning_rate = 2e-5 - num_train_epochs = 3.0 - warmump_steps = 500 - weight_decay = 0.01 Other values were left at their defaults as listed [here](https://huggingface.co/transformers/main_classes/trainer.html#transformers.TrainingArguments). # Citation and contact Please cite [this paper](https://www.sciencedirect.com/science/article/pii/S0167811622000477) (Published in the [IJRM](https://www.journals.elsevier.com/international-journal-of-research-in-marketing)) when you use our model. Feel free to reach out to [christian.siebert@uni-hamburg.de](mailto:christian.siebert@uni-hamburg.de) with any questions or feedback you may have. ``` @article{hartmann2023, title = {More than a Feeling: Accuracy and Application of Sentiment Analysis}, journal = {International Journal of Research in Marketing}, volume = {40}, number = {1}, pages = {75-87}, year = {2023}, doi = {https://doi.org/10.1016/j.ijresmar.2022.05.005}, url = {https://www.sciencedirect.com/science/article/pii/S0167811622000477}, author = {Jochen Hartmann and Mark Heitmann and Christian Siebert and Christina Schamp}, } ```
naver/splade-cocondenser-ensembledistil
naver
"2022-05-11T08:05:37Z"
359,601
27
transformers
[ "transformers", "pytorch", "bert", "fill-mask", "splade", "query-expansion", "document-expansion", "bag-of-words", "passage-retrieval", "knowledge-distillation", "en", "dataset:ms_marco", "arxiv:2205.04733", "license:cc-by-nc-sa-4.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
"2022-05-09T13:18:41Z"
--- license: cc-by-nc-sa-4.0 language: "en" tags: - splade - query-expansion - document-expansion - bag-of-words - passage-retrieval - knowledge-distillation datasets: - ms_marco --- ## SPLADE CoCondenser EnsembleDistil SPLADE model for passage retrieval. For additional details, please visit: * paper: https://arxiv.org/abs/2205.04733 * code: https://github.com/naver/splade | | MRR@10 (MS MARCO dev) | R@1000 (MS MARCO dev) | | --- | --- | --- | | `splade-cocondenser-ensembledistil` | 38.3 | 98.3 | ## Citation If you use our checkpoint, please cite our work: ``` @misc{https://doi.org/10.48550/arxiv.2205.04733, doi = {10.48550/ARXIV.2205.04733}, url = {https://arxiv.org/abs/2205.04733}, author = {Formal, Thibault and Lassance, Carlos and Piwowarski, Benjamin and Clinchant, Stéphane}, keywords = {Information Retrieval (cs.IR), Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {From Distillation to Hard Negative Sampling: Making Sparse Neural IR Models More Effective}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution Non Commercial Share Alike 4.0 International} } ```
timm/vit_base_patch16_224.augreg_in21k
timm
"2023-05-06T00:00:35Z"
358,708
7
timm
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-21k", "arxiv:2106.10270", "arxiv:2010.11929", "license:apache-2.0", "region:us" ]
image-classification
"2022-12-22T07:25:23Z"
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-21k --- # Model card for vit_base_patch16_224.augreg_in21k A Vision Transformer (ViT) image classification model. Trained on ImageNet-21k (with additional augmentation and regularization) in JAX by paper authors, ported to PyTorch by Ross Wightman. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 102.6 - GMACs: 16.9 - Activations (M): 16.5 - Image size: 224 x 224 - **Papers:** - How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers: https://arxiv.org/abs/2106.10270 - An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale: https://arxiv.org/abs/2010.11929v2 - **Dataset:** ImageNet-21k - **Original:** https://github.com/google-research/vision_transformer ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('vit_base_patch16_224.augreg_in21k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vit_base_patch16_224.augreg_in21k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 197, 768) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @article{steiner2021augreg, title={How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers}, author={Steiner, Andreas and Kolesnikov, Alexander and and Zhai, Xiaohua and Wightman, Ross and Uszkoreit, Jakob and Beyer, Lucas}, journal={arXiv preprint arXiv:2106.10270}, year={2021} } ``` ```bibtex @article{dosovitskiy2020vit, title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, author={Dosovitskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and Uszkoreit, Jakob and Houlsby, Neil}, journal={ICLR}, year={2021} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
nlpaueb/legal-bert-base-uncased
nlpaueb
"2022-04-28T14:42:50Z"
358,350
120
transformers
[ "transformers", "pytorch", "tf", "jax", "bert", "pretraining", "legal", "fill-mask", "en", "license:cc-by-sa-4.0", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
"2022-03-02T23:29:05Z"
--- language: en pipeline_tag: fill-mask license: cc-by-sa-4.0 thumbnail: https://i.ibb.co/p3kQ7Rw/Screenshot-2020-10-06-at-12-16-36-PM.png tags: - legal widget: - text: "The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of police." --- # LEGAL-BERT: The Muppets straight out of Law School <img align="left" src="https://i.ibb.co/p3kQ7Rw/Screenshot-2020-10-06-at-12-16-36-PM.png" width="100"/> LEGAL-BERT is a family of BERT models for the legal domain, intended to assist legal NLP research, computational law, and legal technology applications. To pre-train the different variations of LEGAL-BERT, we collected 12 GB of diverse English legal text from several fields (e.g., legislation, court cases, contracts) scraped from publicly available resources. Sub-domain variants (CONTRACTS-, EURLEX-, ECHR-) and/or general LEGAL-BERT perform better than using BERT out of the box for domain-specific tasks. A light-weight model (33% the size of BERT-BASE) pre-trained from scratch on legal data with competitive performance is also available. <br/><br/> --- I. Chalkidis, M. Fergadiotis, P. Malakasiotis, N. Aletras and I. Androutsopoulos. "LEGAL-BERT: The Muppets straight out of Law School". In Findings of Empirical Methods in Natural Language Processing (EMNLP 2020) (Short Papers), to be held online, 2020. (https://aclanthology.org/2020.findings-emnlp.261) --- ## Pre-training corpora The pre-training corpora of LEGAL-BERT include: * 116,062 documents of EU legislation, publicly available from EURLEX (http://eur-lex.europa.eu), the repository of EU Law running under the EU Publication Office. * 61,826 documents of UK legislation, publicly available from the UK legislation portal (http://www.legislation.gov.uk). * 19,867 cases from the European Court of Justice (ECJ), also available from EURLEX. * 12,554 cases from HUDOC, the repository of the European Court of Human Rights (ECHR) (http://hudoc.echr.coe.int/eng). * 164,141 cases from various courts across the USA, hosted in the Case Law Access Project portal (https://case.law). * 76,366 US contracts from EDGAR, the database of US Securities and Exchange Commission (SECOM) (https://www.sec.gov/edgar.shtml). ## Pre-training details * We trained BERT using the official code provided in Google BERT's GitHub repository (https://github.com/google-research/bert). * We released a model similar to the English BERT-BASE model (12-layer, 768-hidden, 12-heads, 110M parameters). * We chose to follow the same training set-up: 1 million training steps with batches of 256 sequences of length 512 with an initial learning rate 1e-4. * We were able to use a single Google Cloud TPU v3-8 provided for free from [TensorFlow Research Cloud (TFRC)](https://www.tensorflow.org/tfrc), while also utilizing [GCP research credits](https://edu.google.com/programs/credits/research). Huge thanks to both Google programs for supporting us! * Part of LEGAL-BERT is a light-weight model pre-trained from scratch on legal data, which achieves comparable performance to larger models, while being much more efficient (approximately 4 times faster) with a smaller environmental footprint. ## Models list | Model name | Model Path | Training corpora | | ------------------- | ------------------------------------ | ------------------- | | CONTRACTS-BERT-BASE | `nlpaueb/bert-base-uncased-contracts` | US contracts | | EURLEX-BERT-BASE | `nlpaueb/bert-base-uncased-eurlex` | EU legislation | | ECHR-BERT-BASE | `nlpaueb/bert-base-uncased-echr` | ECHR cases | | LEGAL-BERT-BASE * | `nlpaueb/legal-bert-base-uncased` | All | | LEGAL-BERT-SMALL | `nlpaueb/legal-bert-small-uncased` | All | \* LEGAL-BERT-BASE is the model referred to as LEGAL-BERT-SC in Chalkidis et al. (2020); a model trained from scratch in the legal corpora mentioned below using a newly created vocabulary by a sentence-piece tokenizer trained on the very same corpora. \*\* As many of you expressed interest in the LEGAL-BERT-FP models (those relying on the original BERT-BASE checkpoint), they have been released in Archive.org (https://archive.org/details/legal_bert_fp), as these models are secondary and possibly only interesting for those who aim to dig deeper in the open questions of Chalkidis et al. (2020). ## Load Pretrained Model ```python from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained("nlpaueb/legal-bert-base-uncased") model = AutoModel.from_pretrained("nlpaueb/legal-bert-base-uncased") ``` ## Use LEGAL-BERT variants as Language Models | Corpus | Model | Masked token | Predictions | | --------------------------------- | ---------------------------------- | ------------ | ------------ | | | **BERT-BASE-UNCASED** | | (Contracts) | This [MASK] Agreement is between General Motors and John Murray . | employment | ('new', '0.09'), ('current', '0.04'), ('proposed', '0.03'), ('marketing', '0.03'), ('joint', '0.02') | (ECHR) | The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of Adana Security Directorate | torture | ('torture', '0.32'), ('rape', '0.22'), ('abuse', '0.14'), ('death', '0.04'), ('violence', '0.03') | (EURLEX) | Establishing a system for the identification and registration of [MASK] animals and regarding the labelling of beef and beef products . | bovine | ('farm', '0.25'), ('livestock', '0.08'), ('draft', '0.06'), ('domestic', '0.05'), ('wild', '0.05') | | **CONTRACTS-BERT-BASE** | | (Contracts) | This [MASK] Agreement is between General Motors and John Murray . | employment | ('letter', '0.38'), ('dealer', '0.04'), ('employment', '0.03'), ('award', '0.03'), ('contribution', '0.02') | (ECHR) | The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of Adana Security Directorate | torture | ('death', '0.39'), ('imprisonment', '0.07'), ('contempt', '0.05'), ('being', '0.03'), ('crime', '0.02') | (EURLEX) | Establishing a system for the identification and registration of [MASK] animals and regarding the labelling of beef and beef products . | bovine | (('domestic', '0.18'), ('laboratory', '0.07'), ('household', '0.06'), ('personal', '0.06'), ('the', '0.04') | | **EURLEX-BERT-BASE** | | (Contracts) | This [MASK] Agreement is between General Motors and John Murray . | employment | ('supply', '0.11'), ('cooperation', '0.08'), ('service', '0.07'), ('licence', '0.07'), ('distribution', '0.05') | (ECHR) | The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of Adana Security Directorate | torture | ('torture', '0.66'), ('death', '0.07'), ('imprisonment', '0.07'), ('murder', '0.04'), ('rape', '0.02') | (EURLEX) | Establishing a system for the identification and registration of [MASK] animals and regarding the labelling of beef and beef products . | bovine | ('live', '0.43'), ('pet', '0.28'), ('certain', '0.05'), ('fur', '0.03'), ('the', '0.02') | | **ECHR-BERT-BASE** | | (Contracts) | This [MASK] Agreement is between General Motors and John Murray . | employment | ('second', '0.24'), ('latter', '0.10'), ('draft', '0.05'), ('bilateral', '0.05'), ('arbitration', '0.04') | (ECHR) | The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of Adana Security Directorate | torture | ('torture', '0.99'), ('death', '0.01'), ('inhuman', '0.00'), ('beating', '0.00'), ('rape', '0.00') | (EURLEX) | Establishing a system for the identification and registration of [MASK] animals and regarding the labelling of beef and beef products . | bovine | ('pet', '0.17'), ('all', '0.12'), ('slaughtered', '0.10'), ('domestic', '0.07'), ('individual', '0.05') | | **LEGAL-BERT-BASE** | | (Contracts) | This [MASK] Agreement is between General Motors and John Murray . | employment | ('settlement', '0.26'), ('letter', '0.23'), ('dealer', '0.04'), ('master', '0.02'), ('supplemental', '0.02') | (ECHR) | The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of Adana Security Directorate | torture | ('torture', '1.00'), ('detention', '0.00'), ('arrest', '0.00'), ('rape', '0.00'), ('death', '0.00') | (EURLEX) | Establishing a system for the identification and registration of [MASK] animals and regarding the labelling of beef and beef products . | bovine | ('live', '0.67'), ('beef', '0.17'), ('farm', '0.03'), ('pet', '0.02'), ('dairy', '0.01') | | **LEGAL-BERT-SMALL** | | (Contracts) | This [MASK] Agreement is between General Motors and John Murray . | employment | ('license', '0.09'), ('transition', '0.08'), ('settlement', '0.04'), ('consent', '0.03'), ('letter', '0.03') | (ECHR) | The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of Adana Security Directorate | torture | ('torture', '0.59'), ('pain', '0.05'), ('ptsd', '0.05'), ('death', '0.02'), ('tuberculosis', '0.02') | (EURLEX) | Establishing a system for the identification and registration of [MASK] animals and regarding the labelling of beef and beef products . | bovine | ('all', '0.08'), ('live', '0.07'), ('certain', '0.07'), ('the', '0.07'), ('farm', '0.05') ## Evaluation on downstream tasks Consider the experiments in the article "LEGAL-BERT: The Muppets straight out of Law School". Chalkidis et al., 2020, (https://aclanthology.org/2020.findings-emnlp.261) ## Author - Publication ``` @inproceedings{chalkidis-etal-2020-legal, title = "{LEGAL}-{BERT}: The Muppets straight out of Law School", author = "Chalkidis, Ilias and Fergadiotis, Manos and Malakasiotis, Prodromos and Aletras, Nikolaos and Androutsopoulos, Ion", booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", doi = "10.18653/v1/2020.findings-emnlp.261", pages = "2898--2904" } ``` ## About Us [AUEB's Natural Language Processing Group](http://nlp.cs.aueb.gr) develops algorithms, models, and systems that allow computers to process and generate natural language texts. The group's current research interests include: * question answering systems for databases, ontologies, document collections, and the Web, especially biomedical question answering, * natural language generation from databases and ontologies, especially Semantic Web ontologies, text classification, including filtering spam and abusive content, * information extraction and opinion mining, including legal text analytics and sentiment analysis, * natural language processing tools for Greek, for example parsers and named-entity recognizers, machine learning in natural language processing, especially deep learning. The group is part of the Information Processing Laboratory of the Department of Informatics of the Athens University of Economics and Business. [Ilias Chalkidis](https://iliaschalkidis.github.io) on behalf of [AUEB's Natural Language Processing Group](http://nlp.cs.aueb.gr) | Github: [@ilias.chalkidis](https://github.com/iliaschalkidis) | Twitter: [@KiddoThe2B](https://twitter.com/KiddoThe2B) |
facebook/detr-resnet-101
facebook
"2023-12-14T17:21:17Z"
356,810
84
transformers
[ "transformers", "pytorch", "safetensors", "detr", "object-detection", "vision", "dataset:coco", "arxiv:2005.12872", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
object-detection
"2022-03-02T23:29:05Z"
--- license: apache-2.0 tags: - object-detection - vision datasets: - coco widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/savanna.jpg example_title: Savanna - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/football-match.jpg example_title: Football Match - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/airport.jpg example_title: Airport --- # DETR (End-to-End Object Detection) model with ResNet-101 backbone DEtection TRansformer (DETR) model trained end-to-end on COCO 2017 object detection (118k annotated images). It was introduced in the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Carion et al. and first released in [this repository](https://github.com/facebookresearch/detr). Disclaimer: The team releasing DETR did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The DETR model is an encoder-decoder transformer with a convolutional backbone. Two heads are added on top of the decoder outputs in order to perform object detection: a linear layer for the class labels and a MLP (multi-layer perceptron) for the bounding boxes. The model uses so-called object queries to detect objects in an image. Each object query looks for a particular object in the image. For COCO, the number of object queries is set to 100. The model is trained using a "bipartite matching loss": one compares the predicted classes + bounding boxes of each of the N = 100 object queries to the ground truth annotations, padded up to the same length N (so if an image only contains 4 objects, 96 annotations will just have a "no object" as class and "no bounding box" as bounding box). The Hungarian matching algorithm is used to create an optimal one-to-one mapping between each of the N queries and each of the N annotations. Next, standard cross-entropy (for the classes) and a linear combination of the L1 and generalized IoU loss (for the bounding boxes) are used to optimize the parameters of the model. ![model image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/detr_architecture.png) ## Intended uses & limitations You can use the raw model for object detection. See the [model hub](https://huggingface.co/models?search=facebook/detr) to look for all available DETR models. ### How to use Here is how to use this model: ```python from transformers import DetrImageProcessor, DetrForObjectDetection import torch from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) # you can specify the revision tag if you don't want the timm dependency processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-101", revision="no_timm") model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-101", revision="no_timm") inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) # convert outputs (bounding boxes and class logits) to COCO API # let's only keep detections with score > 0.9 target_sizes = torch.tensor([image.size[::-1]]) results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.9)[0] for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): box = [round(i, 2) for i in box.tolist()] print( f"Detected {model.config.id2label[label.item()]} with confidence " f"{round(score.item(), 3)} at location {box}" ) ``` This should output (something along the lines of): ``` Detected cat with confidence 0.998 at location [344.06, 24.85, 640.34, 373.74] Detected remote with confidence 0.997 at location [328.13, 75.93, 372.81, 187.66] Detected remote with confidence 0.997 at location [39.34, 70.13, 175.56, 118.78] Detected cat with confidence 0.998 at location [15.36, 51.75, 316.89, 471.16] Detected couch with confidence 0.995 at location [-0.19, 0.71, 639.73, 474.17] ``` Currently, both the feature extractor and model support PyTorch. ## Training data The DETR model was trained on [COCO 2017 object detection](https://cocodataset.org/#download), a dataset consisting of 118k/5k annotated images for training/validation respectively. ## Training procedure ### Preprocessing The exact details of preprocessing of images during training/validation can be found [here](https://github.com/google-research/vision_transformer/blob/master/vit_jax/input_pipeline.py). Images are resized/rescaled such that the shortest side is at least 800 pixels and the largest side at most 1333 pixels, and normalized across the RGB channels with the ImageNet mean (0.485, 0.456, 0.406) and standard deviation (0.229, 0.224, 0.225). ### Training The model was trained for 300 epochs on 16 V100 GPUs. This takes 3 days, with 4 images per GPU (hence a total batch size of 64). ## Evaluation results This model achieves an AP (average precision) of **43.5** on COCO 2017 validation. For more details regarding evaluation results, we refer to table 1 of the original paper. ### BibTeX entry and citation info ```bibtex @article{DBLP:journals/corr/abs-2005-12872, author = {Nicolas Carion and Francisco Massa and Gabriel Synnaeve and Nicolas Usunier and Alexander Kirillov and Sergey Zagoruyko}, title = {End-to-End Object Detection with Transformers}, journal = {CoRR}, volume = {abs/2005.12872}, year = {2020}, url = {https://arxiv.org/abs/2005.12872}, archivePrefix = {arXiv}, eprint = {2005.12872}, timestamp = {Thu, 28 May 2020 17:38:09 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-2005-12872.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
oliverguhr/german-sentiment-bert
oliverguhr
"2023-03-16T18:09:30Z"
355,911
46
transformers
[ "transformers", "pytorch", "tf", "jax", "safetensors", "bert", "text-classification", "sentiment", "de", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text-classification
"2022-03-02T23:29:05Z"
--- language: - de tags: - sentiment - bert license: mit widget: - text: "Das ist gar nicht mal so schlecht" metrics: - f1 --- # German Sentiment Classification with Bert This model was trained for sentiment classification of German language texts. To achieve the best results all model inputs needs to be preprocessed with the same procedure, that was applied during the training. To simplify the usage of the model, we provide a Python package that bundles the code need for the preprocessing and inferencing. The model uses the Googles Bert architecture and was trained on 1.834 million German-language samples. The training data contains texts from various domains like Twitter, Facebook and movie, app and hotel reviews. You can find more information about the dataset and the training process in the [paper](http://www.lrec-conf.org/proceedings/lrec2020/pdf/2020.lrec-1.202.pdf). ## Using the Python package To get started install the package from [pypi](https://pypi.org/project/germansentiment/): ```bash pip install germansentiment ``` ```python from germansentiment import SentimentModel model = SentimentModel() texts = [ "Mit keinem guten Ergebniss","Das ist gar nicht mal so gut", "Total awesome!","nicht so schlecht wie erwartet", "Der Test verlief positiv.","Sie fährt ein grünes Auto."] result = model.predict_sentiment(texts) print(result) ``` The code above will output following list: ```python ["negative","negative","positive","positive","neutral", "neutral"] ``` ### Output class probabilities ```python from germansentiment import SentimentModel model = SentimentModel() classes, probabilities = model.predict_sentiment(["das ist super"], output_probabilities = True) print(classes, probabilities) ``` ```python ['positive'] [[['positive', 0.9761366844177246], ['negative', 0.023540444672107697], ['neutral', 0.00032294404809363186]]] ``` ## Model and Data If you are interested in code and data that was used to train this model please have a look at [this repository](https://github.com/oliverguhr/german-sentiment) and our [paper](http://www.lrec-conf.org/proceedings/lrec2020/pdf/2020.lrec-1.202.pdf). Here is a table of the F1 scores that this model achieves on different datasets. Since we trained this model with a newer version of the transformer library, the results are slightly better than reported in the paper. | Dataset | F1 micro Score | | :----------------------------------------------------------- | -------------: | | [holidaycheck](https://github.com/oliverguhr/german-sentiment) | 0.9568 | | [scare](https://www.romanklinger.de/scare/) | 0.9418 | | [filmstarts](https://github.com/oliverguhr/german-sentiment) | 0.9021 | | [germeval](https://sites.google.com/view/germeval2017-absa/home) | 0.7536 | | [PotTS](https://www.aclweb.org/anthology/L16-1181/) | 0.6780 | | [emotions](https://github.com/oliverguhr/german-sentiment) | 0.9649 | | [sb10k](https://www.spinningbytes.com/resources/germansentiment/) | 0.7376 | | [Leipzig Wikipedia Corpus 2016](https://wortschatz.uni-leipzig.de/de/download/german) | 0.9967 | | all | 0.9639 | ## Cite For feedback and questions contact me view mail or Twitter [@oliverguhr](https://twitter.com/oliverguhr). Please cite us if you found this useful: ``` @InProceedings{guhr-EtAl:2020:LREC, author = {Guhr, Oliver and Schumann, Anne-Kathrin and Bahrmann, Frank and Böhme, Hans Joachim}, title = {Training a Broad-Coverage German Sentiment Classification Model for Dialog Systems}, booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference}, month = {May}, year = {2020}, address = {Marseille, France}, publisher = {European Language Resources Association}, pages = {1620--1625}, url = {https://www.aclweb.org/anthology/2020.lrec-1.202} } ```
stabilityai/sdxl-turbo
stabilityai
"2024-04-12T08:48:24Z"
353,624
1,933
diffusers
[ "diffusers", "onnx", "safetensors", "text-to-image", "license:other", "has_space", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
"2023-11-27T15:19:11Z"
--- pipeline_tag: text-to-image inference: false license: other license_name: sai-nc-community license_link: https://huggingface.co/stabilityai/sdxl-turbo/blob/main/LICENSE.TXT --- # SDXL-Turbo Model Card <!-- Provide a quick summary of what the model is/does. --> ![row01](output_tile.jpg) SDXL-Turbo is a fast generative text-to-image model that can synthesize photorealistic images from a text prompt in a single network evaluation. A real-time demo is available here: http://clipdrop.co/stable-diffusion-turbo Please note: For commercial use, please refer to https://stability.ai/membership. ## Model Details ### Model Description SDXL-Turbo is a distilled version of [SDXL 1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0), trained for real-time synthesis. SDXL-Turbo is based on a novel training method called Adversarial Diffusion Distillation (ADD) (see the [technical report](https://stability.ai/research/adversarial-diffusion-distillation)), which allows sampling large-scale foundational image diffusion models in 1 to 4 steps at high image quality. This approach uses score distillation to leverage large-scale off-the-shelf image diffusion models as a teacher signal and combines this with an adversarial loss to ensure high image fidelity even in the low-step regime of one or two sampling steps. - **Developed by:** Stability AI - **Funded by:** Stability AI - **Model type:** Generative text-to-image model - **Finetuned from model:** [SDXL 1.0 Base](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) ### Model Sources For research purposes, we recommend our `generative-models` Github repository (https://github.com/Stability-AI/generative-models), which implements the most popular diffusion frameworks (both training and inference). - **Repository:** https://github.com/Stability-AI/generative-models - **Paper:** https://stability.ai/research/adversarial-diffusion-distillation - **Demo:** http://clipdrop.co/stable-diffusion-turbo ## Evaluation ![comparison1](image_quality_one_step.png) ![comparison2](prompt_alignment_one_step.png) The charts above evaluate user preference for SDXL-Turbo over other single- and multi-step models. SDXL-Turbo evaluated at a single step is preferred by human voters in terms of image quality and prompt following over LCM-XL evaluated at four (or fewer) steps. In addition, we see that using four steps for SDXL-Turbo further improves performance. For details on the user study, we refer to the [research paper](https://stability.ai/research/adversarial-diffusion-distillation). ## Uses ### Direct Use The model is intended for both non-commercial and commercial usage. You can use this model for non-commercial or research purposes under this [license](https://huggingface.co/stabilityai/sdxl-turbo/blob/main/LICENSE.TXT). Possible research areas and tasks include - Research on generative models. - Research on real-time applications of generative models. - Research on the impact of real-time generative models. - Safe deployment of models which have the potential to generate harmful content. - Probing and understanding the limitations and biases of generative models. - Generation of artworks and use in design and other artistic processes. - Applications in educational or creative tools. For commercial use, please refer to https://stability.ai/membership. Excluded uses are described below. ### Diffusers ``` pip install diffusers transformers accelerate --upgrade ``` - **Text-to-image**: SDXL-Turbo does not make use of `guidance_scale` or `negative_prompt`, we disable it with `guidance_scale=0.0`. Preferably, the model generates images of size 512x512 but higher image sizes work as well. A **single step** is enough to generate high quality images. ```py from diffusers import AutoPipelineForText2Image import torch pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16") pipe.to("cuda") prompt = "A cinematic shot of a baby racoon wearing an intricate italian priest robe." image = pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0] ``` - **Image-to-image**: When using SDXL-Turbo for image-to-image generation, make sure that `num_inference_steps` * `strength` is larger or equal to 1. The image-to-image pipeline will run for `int(num_inference_steps * strength)` steps, *e.g.* 0.5 * 2.0 = 1 step in our example below. ```py from diffusers import AutoPipelineForImage2Image from diffusers.utils import load_image import torch pipe = AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16") pipe.to("cuda") init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png").resize((512, 512)) prompt = "cat wizard, gandalf, lord of the rings, detailed, fantasy, cute, adorable, Pixar, Disney, 8k" image = pipe(prompt, image=init_image, num_inference_steps=2, strength=0.5, guidance_scale=0.0).images[0] ``` ### Out-of-Scope Use The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model. The model should not be used in any way that violates Stability AI's [Acceptable Use Policy](https://stability.ai/use-policy). ## Limitations and Bias ### Limitations - The generated images are of a fixed resolution (512x512 pix), and the model does not achieve perfect photorealism. - The model cannot render legible text. - Faces and people in general may not be generated properly. - The autoencoding part of the model is lossy. ### Recommendations The model is intended for both non-commercial and commercial usage. ## How to Get Started with the Model Check out https://github.com/Stability-AI/generative-models
Joe99/FinetuneBERTClsFAQ
Joe99
"2023-01-13T10:29:58Z"
349,301
0
transformers
[ "transformers", "pytorch", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
"2023-01-13T10:28:12Z"
Entry not found
openai-community/gpt2-medium
openai-community
"2024-02-19T12:39:04Z"
347,632
118
transformers
[ "transformers", "pytorch", "tf", "jax", "rust", "onnx", "safetensors", "gpt2", "text-generation", "en", "arxiv:1910.09700", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2022-03-02T23:29:04Z"
--- language: en license: mit --- # GPT-2 Medium ## Model Details **Model Description:** GPT-2 Medium is the **355M parameter** version of GPT-2, a transformer-based language model created and released by OpenAI. The model is a pretrained model on English language using a causal language modeling (CLM) objective. - **Developed by:** OpenAI, see [associated research paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) and [GitHub repo](https://github.com/openai/gpt-2) for model developers. - **Model Type:** Transformer-based language model - **Language(s):** English - **License:** [Modified MIT License](https://github.com/openai/gpt-2/blob/master/LICENSE) - **Related Models:** [GPT2](https://huggingface.co/gpt2), [GPT2-Large](https://huggingface.co/gpt2-large) and [GPT2-XL](https://huggingface.co/gpt2-xl) - **Resources for more information:** - [Research Paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) - [OpenAI Blog Post](https://openai.com/blog/better-language-models/) - [GitHub Repo](https://github.com/openai/gpt-2) - [OpenAI Model Card for GPT-2](https://github.com/openai/gpt-2/blob/master/model_card.md) - Test the full generation capabilities here: https://transformer.huggingface.co/doc/gpt2-large ## How to Get Started with the Model Use the code below to get started with the model. You can use this model directly with a pipeline for text generation. Since the generation relies on some randomness, we set a seed for reproducibility: ```python >>> from transformers import pipeline, set_seed >>> generator = pipeline('text-generation', model='gpt2-medium') >>> set_seed(42) >>> generator("Hello, I'm a language model,", max_length=30, num_return_sequences=5) [{'generated_text': "Hello, I'm a language model, I'm a language. I'm a compiler, I'm a parser, I'm a server process. I"}, {'generated_text': "Hello, I'm a language model, and I'd like to join an existing team. What can I do to get started?\n\nI'd"}, {'generated_text': "Hello, I'm a language model, why does my code get created? Can't I just copy it? But why did my code get created when"}, {'generated_text': "Hello, I'm a language model, a functional language...\n\nI'm a functional language. Is it hard? A little, yes. But"}, {'generated_text': "Hello, I'm a language model, not an object model.\n\nIn a nutshell, I need to give me objects from which I can get"}] ``` Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import GPT2Tokenizer, GPT2Model tokenizer = GPT2Tokenizer.from_pretrained('gpt2-medium') model = GPT2Model.from_pretrained('gpt2-medium') text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import GPT2Tokenizer, TFGPT2Model tokenizer = GPT2Tokenizer.from_pretrained('gpt2-medium') model = TFGPT2Model.from_pretrained('gpt2-medium') text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ## Uses #### Direct Use In their [model card about GPT-2](https://github.com/openai/gpt-2/blob/master/model_card.md), OpenAI wrote: > The primary intended users of these models are AI researchers and practitioners. > > We primarily imagine these language models will be used by researchers to better understand the behaviors, capabilities, biases, and constraints of large-scale generative language models. #### Downstream Use In their [model card about GPT-2](https://github.com/openai/gpt-2/blob/master/model_card.md), OpenAI wrote: > Here are some secondary use cases we believe are likely: > > - Writing assistance: Grammar assistance, autocompletion (for normal prose or code) > - Creative writing and art: exploring the generation of creative, fictional texts; aiding creation of poetry and other literary art. > - Entertainment: Creation of games, chat bots, and amusing generations. #### Misuse and Out-of-scope Use In their [model card about GPT-2](https://github.com/openai/gpt-2/blob/master/model_card.md), OpenAI wrote: > Because large-scale language models like GPT-2 do not distinguish fact from fiction, we don’t support use-cases that require the generated text to be true. > > Additionally, language models like GPT-2 reflect the biases inherent to the systems they were trained on, so we do not recommend that they be deployed into systems that interact with humans unless the deployers first carry out a study of biases relevant to the intended use-case. We found no statistically significant difference in gender, race, and religious bias probes between 774M and 1.5B, implying all versions of GPT-2 should be approached with similar levels of caution around use cases that are sensitive to biases around human attributes. ## Risks, Limitations and Biases **CONTENT WARNING: Readers should be aware this section contains content that is disturbing, offensive, and can propogate historical and current stereotypes.** Significant research has explored bias and fairness issues with language models (see, e.g., [Sheng et al. (2021)](https://aclanthology.org/2021.acl-long.330.pdf) and [Bender et al. (2021)](https://dl.acm.org/doi/pdf/10.1145/3442188.3445922)). The training data used for this model has not been released as a dataset one can browse. We know it contains a lot of unfiltered content from the internet, which is far from neutral. Predictions generated by the model can include disturbing and harmful stereotypes across protected classes; identity characteristics; and sensitive, social, and occupational groups. For example: ```python >>> from transformers import pipeline, set_seed >>> generator = pipeline('text-generation', model='gpt2-medium') >>> set_seed(42) >>> generator("The man worked as a", max_length=10, num_return_sequences=5) [{'generated_text': 'The man worked as a security guard in a military'}, {'generated_text': 'The man worked as a salesman in Mexico and eventually'}, {'generated_text': 'The man worked as a supervisor at the department for'}, {'generated_text': 'The man worked as a cleaner for the same corporation'}, {'generated_text': 'The man worked as a barman and was involved'}] >>> set_seed(42) >>> generator("The woman worked as a", max_length=10, num_return_sequences=5) [{'generated_text': 'The woman worked as a social worker in a children'}, {'generated_text': 'The woman worked as a marketing manager, and her'}, {'generated_text': 'The woman worked as a customer service agent in a'}, {'generated_text': 'The woman worked as a cleaner for the same corporation'}, {'generated_text': 'The woman worked as a barista and was involved'}] ``` This bias will also affect all fine-tuned versions of this model. Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. ## Training #### Training Data The OpenAI team wanted to train this model on a corpus as large as possible. To build it, they scraped all the web pages from outbound links on Reddit which received at least 3 karma. Note that all Wikipedia pages were removed from this dataset, so the model was not trained on any part of Wikipedia. The resulting dataset (called WebText) weights 40GB of texts but has not been publicly released. You can find a list of the top 1,000 domains present in WebText [here](https://github.com/openai/gpt-2/blob/master/domains.txt). #### Training Procedure The model is pretrained on a very large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was trained to guess the next word in sentences. More precisely, inputs are sequences of continuous text of a certain length and the targets are the same sequence, shifted one token (word or piece of word) to the right. The model uses internally a mask-mechanism to make sure the predictions for the token `i` only uses the inputs from `1` to `i` but not the future tokens. This way, the model learns an inner representation of the English language that can then be used to extract features useful for downstream tasks. The texts are tokenized using a byte-level version of Byte Pair Encoding (BPE) (for unicode characters) and a vocabulary size of 50,257. The inputs are sequences of 1024 consecutive tokens. ## Evaluation The following evaluation information is extracted from the [associated paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf). #### Testing Data, Factors and Metrics The model authors write in the [associated paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) that: > Since our model operates on a byte level and does not require lossy pre-processing or tokenization, we can evaluate it on any language model benchmark. Results on language modeling datasets are commonly reported in a quantity which is a scaled or ex- ponentiated version of the average negative log probability per canonical prediction unit - usually a character, a byte, or a word. We evaluate the same quantity by computing the log-probability of a dataset according to a WebText LM and dividing by the number of canonical units. For many of these datasets, WebText LMs would be tested significantly out- of-distribution, having to predict aggressively standardized text, tokenization artifacts such as disconnected punctuation and contractions, shuffled sentences, and even the string <UNK> which is extremely rare in WebText - occurring only 26 times in 40 billion bytes. We report our main results...using invertible de-tokenizers which remove as many of these tokenization / pre-processing artifacts as possible. Since these de-tokenizers are invertible, we can still calculate the log probability of a dataset and they can be thought of as a simple form of domain adaptation. #### Results The model achieves the following results without any fine-tuning (zero-shot): | Dataset | LAMBADA | LAMBADA | CBT-CN | CBT-NE | WikiText2 | PTB | enwiki8 | text8 | WikiText103 | 1BW | |:--------:|:-------:|:-------:|:------:|:------:|:---------:|:------:|:-------:|:------:|:-----------:|:-----:| | (metric) | (PPL) | (ACC) | (ACC) | (ACC) | (PPL) | (PPL) | (BPB) | (BPC) | (PPL) | (PPL) | | | 15.60 | 55.48 | 92.35 | 87.1 | 22.76 | 47.33 | 1.01 | 1.06 | 26.37 | 55.72 | ## Environmental Impact Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** Unknown - **Hours used:** Unknown - **Cloud Provider:** Unknown - **Compute Region:** Unknown - **Carbon Emitted:** Unknown ## Technical Specifications See the [associated paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) for details on the modeling architecture, objective, compute infrastructure, and training details. ## Citation Information ```bibtex @article{radford2019language, title={Language models are unsupervised multitask learners}, author={Radford, Alec and Wu, Jeffrey and Child, Rewon and Luan, David and Amodei, Dario and Sutskever, Ilya and others}, journal={OpenAI blog}, volume={1}, number={8}, pages={9}, year={2019} } ``` ## Model Card Authors This model card was written by the Hugging Face team.
kykim/bert-kor-base
kykim
"2021-05-19T21:17:13Z"
346,702
17
transformers
[ "transformers", "pytorch", "tf", "jax", "bert", "fill-mask", "ko", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
"2022-03-02T23:29:05Z"
--- language: ko --- # Bert base model for Korean * 70GB Korean text dataset and 42000 lower-cased subwords are used * Check the model performance and other language models for Korean in [github](https://github.com/kiyoungkim1/LM-kor) ```python from transformers import BertTokenizerFast, BertModel tokenizer_bert = BertTokenizerFast.from_pretrained("kykim/bert-kor-base") model_bert = BertModel.from_pretrained("kykim/bert-kor-base") ```
google/gemma-7b-it
google
"2024-04-11T17:59:07Z"
346,578
1,043
transformers
[ "transformers", "safetensors", "gguf", "gemma", "text-generation", "conversational", "arxiv:2312.11805", "arxiv:2009.03300", "arxiv:1905.07830", "arxiv:1911.11641", "arxiv:1904.09728", "arxiv:1905.10044", "arxiv:1907.10641", "arxiv:1811.00937", "arxiv:1809.02789", "arxiv:1911.01547", "arxiv:1705.03551", "arxiv:2107.03374", "arxiv:2108.07732", "arxiv:2110.14168", "arxiv:2304.06364", "arxiv:2206.04615", "arxiv:1804.06876", "arxiv:2110.08193", "arxiv:2009.11462", "arxiv:2101.11718", "arxiv:1804.09301", "arxiv:2109.07958", "arxiv:2203.09509", "license:gemma", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2024-02-13T01:07:30Z"
--- library_name: transformers tags: [] widget: - messages: - role: user content: How does the brain work? inference: parameters: max_new_tokens: 200 extra_gated_heading: Access Gemma on Hugging Face extra_gated_prompt: >- To access Gemma on Hugging Face, you’re required to review and agree to Google’s usage license. To do this, please ensure you’re logged-in to Hugging Face and click below. Requests are processed immediately. extra_gated_button_content: Acknowledge license license: gemma --- # Gemma Model Card **Model Page**: [Gemma](https://ai.google.dev/gemma/docs) This model card corresponds to the 7B instruct version of the Gemma model. You can also visit the model card of the [2B base model](https://huggingface.co/google/gemma-2b), [7B base model](https://huggingface.co/google/gemma-7b), and [2B instruct model](https://huggingface.co/google/gemma-2b-it). **Resources and Technical Documentation**: * [Responsible Generative AI Toolkit](https://ai.google.dev/responsible) * [Gemma on Kaggle](https://www.kaggle.com/models/google/gemma) * [Gemma on Vertex Model Garden](https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/335?version=gemma-7b-it-gg-hf) **Terms of Use**: [Terms](https://www.kaggle.com/models/google/gemma/license/consent) **Authors**: Google ## Model Information Summary description and brief definition of inputs and outputs. ### Description Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models. They are text-to-text, decoder-only large language models, available in English, with open weights, pre-trained variants, and instruction-tuned variants. Gemma models are well-suited for a variety of text generation tasks, including question answering, summarization, and reasoning. Their relatively small size makes it possible to deploy them in environments with limited resources such as a laptop, desktop or your own cloud infrastructure, democratizing access to state of the art AI models and helping foster innovation for everyone. ### Usage Below we share some code snippets on how to get quickly started with running the model. First make sure to `pip install -U transformers`, then copy the snippet from the section that is relevant for your usecase. #### Fine-tuning the model You can find fine-tuning scripts and notebook under the [`examples/` directory](https://huggingface.co/google/gemma-7b/tree/main/examples) of [`google/gemma-7b`](https://huggingface.co/google/gemma-7b) repository. To adapt it to this model, simply change the model-id to `google/gemma-7b-it`. In that repository, we provide: * A script to perform Supervised Fine-Tuning (SFT) on UltraChat dataset using QLoRA * A script to perform SFT using FSDP on TPU devices * A notebook that you can run on a free-tier Google Colab instance to perform SFT on English quotes dataset #### Running the model on a CPU As explained below, we recommend `torch.bfloat16` as the default dtype. You can use [a different precision](#precisions) if necessary. ```python from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-7b-it", torch_dtype=torch.bfloat16 ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Running the model on a single / multi GPU ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-7b-it", device_map="auto", torch_dtype=torch.bfloat16 ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` <a name="precisions"></a> #### Running the model on a GPU using different precisions The native weights of this model were exported in `bfloat16` precision. You can use `float16`, which may be faster on certain hardware, indicating the `torch_dtype` when loading the model. For convenience, the `float16` revision of the repo contains a copy of the weights already converted to that precision. You can also use `float32` if you skip the dtype, but no precision increase will occur (model weights will just be upcasted to `float32`). See examples below. * _Using `torch.float16`_ ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-7b-it", device_map="auto", torch_dtype=torch.float16, revision="float16", ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` * _Using `torch.bfloat16`_ ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b-it") model = AutoModelForCausalLM.from_pretrained("google/gemma-7b-it", device_map="auto", torch_dtype=torch.bfloat16) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` * _Upcasting to `torch.float32`_ ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-7b-it", device_map="auto" ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Quantized Versions through `bitsandbytes` * _Using 8-bit precision (int8)_ ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_8bit=True) tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b-it") model = AutoModelForCausalLM.from_pretrained("google/gemma-7b-it", quantization_config=quantization_config) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` * _Using 4-bit precision_ ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_4bit=True) tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b-it") model = AutoModelForCausalLM.from_pretrained("google/gemma-7b-it", quantization_config=quantization_config) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Other optimizations * _Flash Attention 2_ First make sure to install `flash-attn` in your environment `pip install flash-attn` ```diff model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, + attn_implementation="flash_attention_2" ).to(0) ``` ### Chat Template The instruction-tuned models use a chat template that must be adhered to for conversational use. The easiest way to apply it is using the tokenizer's built-in chat template, as shown in the following snippet. Let's load the model and apply the chat template to a conversation. In this example, we'll start with a single user interaction: ```py from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model_id = "google/gemma-7b-it" dtype = torch.bfloat16 tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, device_map="cuda", torch_dtype=dtype, ) chat = [ { "role": "user", "content": "Write a hello world program" }, ] prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True) ``` At this point, the prompt contains the following text: ``` <bos><start_of_turn>user Write a hello world program<end_of_turn> <start_of_turn>model ``` As you can see, each turn is preceded by a `<start_of_turn>` delimiter and then the role of the entity (either `user`, for content supplied by the user, or `model` for LLM responses). Turns finish with the `<end_of_turn>` token. You can follow this format to build the prompt manually, if you need to do it without the tokenizer's chat template. After the prompt is ready, generation can be performed like this: ```py inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt") outputs = model.generate(input_ids=inputs.to(model.device), max_new_tokens=150) print(tokenizer.decode(outputs[0])) ``` ### Inputs and outputs * **Input:** Text string, such as a question, a prompt, or a document to be summarized. * **Output:** Generated English-language text in response to the input, such as an answer to a question, or a summary of a document. ## Model Data Data used for model training and how the data was processed. ### Training Dataset These models were trained on a dataset of text data that includes a wide variety of sources, totaling 6 trillion tokens. Here are the key components: * Web Documents: A diverse collection of web text ensures the model is exposed to a broad range of linguistic styles, topics, and vocabulary. Primarily English-language content. * Code: Exposing the model to code helps it to learn the syntax and patterns of programming languages, which improves its ability to generate code or understand code-related questions. * Mathematics: Training on mathematical text helps the model learn logical reasoning, symbolic representation, and to address mathematical queries. The combination of these diverse data sources is crucial for training a powerful language model that can handle a wide variety of different tasks and text formats. ### Data Preprocessing Here are the key data cleaning and filtering methods applied to the training data: * CSAM Filtering: Rigorous CSAM (Child Sexual Abuse Material) filtering was applied at multiple stages in the data preparation process to ensure the exclusion of harmful and illegal content * Sensitive Data Filtering: As part of making Gemma pre-trained models safe and reliable, automated techniques were used to filter out certain personal information and other sensitive data from training sets. * Additional methods: Filtering based on content quality and safely in line with [our policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11). ## Implementation Information Details about the model internals. ### Hardware Gemma was trained using the latest generation of [Tensor Processing Unit (TPU)](https://cloud.google.com/tpu/docs/intro-to-tpu) hardware (TPUv5e). Training large language models requires significant computational power. TPUs, designed specifically for matrix operations common in machine learning, offer several advantages in this domain: * Performance: TPUs are specifically designed to handle the massive computations involved in training LLMs. They can speed up training considerably compared to CPUs. * Memory: TPUs often come with large amounts of high-bandwidth memory, allowing for the handling of large models and batch sizes during training. This can lead to better model quality. * Scalability: TPU Pods (large clusters of TPUs) provide a scalable solution for handling the growing complexity of large foundation models. You can distribute training across multiple TPU devices for faster and more efficient processing. * Cost-effectiveness: In many scenarios, TPUs can provide a more cost-effective solution for training large models compared to CPU-based infrastructure, especially when considering the time and resources saved due to faster training. * These advantages are aligned with [Google's commitments to operate sustainably](https://sustainability.google/operating-sustainably/). ### Software Training was done using [JAX](https://github.com/google/jax) and [ML Pathways](https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture). JAX allows researchers to take advantage of the latest generation of hardware, including TPUs, for faster and more efficient training of large models. ML Pathways is Google's latest effort to build artificially intelligent systems capable of generalizing across multiple tasks. This is specially suitable for [foundation models](https://ai.google/discover/foundation-models/), including large language models like these ones. Together, JAX and ML Pathways are used as described in the [paper about the Gemini family of models](https://arxiv.org/abs/2312.11805); "the 'single controller' programming model of Jax and Pathways allows a single Python process to orchestrate the entire training run, dramatically simplifying the development workflow." ## Evaluation Model evaluation metrics and results. ### Benchmark Results These models were evaluated against a large collection of different datasets and metrics to cover different aspects of text generation: | Benchmark | Metric | 2B Params | 7B Params | | ------------------------------ | ------------- | ----------- | --------- | | [MMLU](https://arxiv.org/abs/2009.03300) | 5-shot, top-1 | 42.3 | 64.3 | | [HellaSwag](https://arxiv.org/abs/1905.07830) | 0-shot |71.4 | 81.2 | | [PIQA](https://arxiv.org/abs/1911.11641) | 0-shot | 77.3 | 81.2 | | [SocialIQA](https://arxiv.org/abs/1904.09728) | 0-shot | 49.7 | 51.8 | | [BooIQ](https://arxiv.org/abs/1905.10044) | 0-shot | 69.4 | 83.2 | | [WinoGrande](https://arxiv.org/abs/1907.10641) | partial score | 65.4 | 72.3 | | [CommonsenseQA](https://arxiv.org/abs/1811.00937) | 7-shot | 65.3 | 71.3 | | [OpenBookQA](https://arxiv.org/abs/1809.02789) | | 47.8 | 52.8 | | [ARC-e](https://arxiv.org/abs/1911.01547) | | 73.2 | 81.5 | | [ARC-c](https://arxiv.org/abs/1911.01547) | | 42.1 | 53.2 | | [TriviaQA](https://arxiv.org/abs/1705.03551) | 5-shot | 53.2 | 63.4 | | [Natural Questions](https://github.com/google-research-datasets/natural-questions) | 5-shot | 12.5 | 23 | | [HumanEval](https://arxiv.org/abs/2107.03374) | pass@1 | 22.0 | 32.3 | | [MBPP](https://arxiv.org/abs/2108.07732) | 3-shot | 29.2 | 44.4 | | [GSM8K](https://arxiv.org/abs/2110.14168) | maj@1 | 17.7 | 46.4 | | [MATH](https://arxiv.org/abs/2108.07732) | 4-shot | 11.8 | 24.3 | | [AGIEval](https://arxiv.org/abs/2304.06364) | | 24.2 | 41.7 | | [BIG-Bench](https://arxiv.org/abs/2206.04615) | | 35.2 | 55.1 | | ------------------------------ | ------------- | ----------- | --------- | | **Average** | | **45.0** | **56.9** | ## Ethics and Safety Ethics and safety evaluation approach and results. ### Evaluation Approach Our evaluation methods include structured evaluations and internal red-teaming testing of relevant content policies. Red-teaming was conducted by a number of different teams, each with different goals and human evaluation metrics. These models were evaluated against a number of different categories relevant to ethics and safety, including: * Text-to-Text Content Safety: Human evaluation on prompts covering safety policies including child sexual abuse and exploitation, harassment, violence and gore, and hate speech. * Text-to-Text Representational Harms: Benchmark against relevant academic datasets such as [WinoBias](https://arxiv.org/abs/1804.06876) and [BBQ Dataset](https://arxiv.org/abs/2110.08193v2). * Memorization: Automated evaluation of memorization of training data, including the risk of personally identifiable information exposure. * Large-scale harm: Tests for "dangerous capabilities," such as chemical, biological, radiological, and nuclear (CBRN) risks. ### Evaluation Results The results of ethics and safety evaluations are within acceptable thresholds for meeting [internal policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11) for categories such as child safety, content safety, representational harms, memorization, large-scale harms. On top of robust internal evaluations, the results of well known safety benchmarks like BBQ, BOLD, Winogender, Winobias, RealToxicity, and TruthfulQA are shown here. | Benchmark | Metric | 2B Params | 7B Params | | ------------------------------ | ------------- | ----------- | --------- | | [RealToxicity](https://arxiv.org/abs/2009.11462) | average | 6.86 | 7.90 | | [BOLD](https://arxiv.org/abs/2101.11718) | | 45.57 | 49.08 | | [CrowS-Pairs](https://aclanthology.org/2020.emnlp-main.154/) | top-1 | 45.82 | 51.33 | | [BBQ Ambig](https://arxiv.org/abs/2110.08193v2) | 1-shot, top-1 | 62.58 | 92.54 | | [BBQ Disambig](https://arxiv.org/abs/2110.08193v2) | top-1 | 54.62 | 71.99 | | [Winogender](https://arxiv.org/abs/1804.09301) | top-1 | 51.25 | 54.17 | | [TruthfulQA](https://arxiv.org/abs/2109.07958) | | 44.84 | 31.81 | | [Winobias 1_2](https://arxiv.org/abs/1804.06876) | | 56.12 | 59.09 | | [Winobias 2_2](https://arxiv.org/abs/1804.06876) | | 91.10 | 92.23 | | [Toxigen](https://arxiv.org/abs/2203.09509) | | 29.77 | 39.59 | | ------------------------------ | ------------- | ----------- | --------- | ## Usage and Limitations These models have certain limitations that users should be aware of. ### Intended Usage Open Large Language Models (LLMs) have a wide range of applications across various industries and domains. The following list of potential uses is not comprehensive. The purpose of this list is to provide contextual information about the possible use-cases that the model creators considered as part of model training and development. * Content Creation and Communication * Text Generation: These models can be used to generate creative text formats such as poems, scripts, code, marketing copy, and email drafts. * Chatbots and Conversational AI: Power conversational interfaces for customer service, virtual assistants, or interactive applications. * Text Summarization: Generate concise summaries of a text corpus, research papers, or reports. * Research and Education * Natural Language Processing (NLP) Research: These models can serve as a foundation for researchers to experiment with NLP techniques, develop algorithms, and contribute to the advancement of the field. * Language Learning Tools: Support interactive language learning experiences, aiding in grammar correction or providing writing practice. * Knowledge Exploration: Assist researchers in exploring large bodies of text by generating summaries or answering questions about specific topics. ### Limitations * Training Data * The quality and diversity of the training data significantly influence the model's capabilities. Biases or gaps in the training data can lead to limitations in the model's responses. * The scope of the training dataset determines the subject areas the model can handle effectively. * Context and Task Complexity * LLMs are better at tasks that can be framed with clear prompts and instructions. Open-ended or highly complex tasks might be challenging. * A model's performance can be influenced by the amount of context provided (longer context generally leads to better outputs, up to a certain point). * Language Ambiguity and Nuance * Natural language is inherently complex. LLMs might struggle to grasp subtle nuances, sarcasm, or figurative language. * Factual Accuracy * LLMs generate responses based on information they learned from their training datasets, but they are not knowledge bases. They may generate incorrect or outdated factual statements. * Common Sense * LLMs rely on statistical patterns in language. They might lack the ability to apply common sense reasoning in certain situations. ### Ethical Considerations and Risks The development of large language models (LLMs) raises several ethical concerns. In creating an open model, we have carefully considered the following: * Bias and Fairness * LLMs trained on large-scale, real-world text data can reflect socio-cultural biases embedded in the training material. These models underwent careful scrutiny, input data pre-processing described and posterior evaluations reported in this card. * Misinformation and Misuse * LLMs can be misused to generate text that is false, misleading, or harmful. * Guidelines are provided for responsible use with the model, see the [Responsible Generative AI Toolkit](http://ai.google.dev/gemma/responsible). * Transparency and Accountability: * This model card summarizes details on the models' architecture, capabilities, limitations, and evaluation processes. * A responsibly developed open model offers the opportunity to share innovation by making LLM technology accessible to developers and researchers across the AI ecosystem. Risks identified and mitigations: * Perpetuation of biases: It's encouraged to perform continuous monitoring (using evaluation metrics, human review) and the exploration of de-biasing techniques during model training, fine-tuning, and other use cases. * Generation of harmful content: Mechanisms and guidelines for content safety are essential. Developers are encouraged to exercise caution and implement appropriate content safety safeguards based on their specific product policies and application use cases. * Misuse for malicious purposes: Technical limitations and developer and end-user education can help mitigate against malicious applications of LLMs. Educational resources and reporting mechanisms for users to flag misuse are provided. Prohibited uses of Gemma models are outlined in the [Gemma Prohibited Use Policy](https://ai.google.dev/gemma/prohibited_use_policy). * Privacy violations: Models were trained on data filtered for removal of PII (Personally Identifiable Information). Developers are encouraged to adhere to privacy regulations with privacy-preserving techniques. ### Benefits At the time of release, this family of models provides high-performance open large language model implementations designed from the ground up for Responsible AI development compared to similarly sized models. Using the benchmark evaluation metrics described in this document, these models have shown to provide superior performance to other, comparably-sized open model alternatives.
sentence-transformers/all-distilroberta-v1
sentence-transformers
"2024-03-27T09:45:18Z"
346,400
21
sentence-transformers
[ "sentence-transformers", "pytorch", "rust", "safetensors", "roberta", "fill-mask", "feature-extraction", "sentence-similarity", "transformers", "en", "dataset:s2orc", "dataset:flax-sentence-embeddings/stackexchange_xml", "dataset:ms_marco", "dataset:gooaq", "dataset:yahoo_answers_topics", "dataset:code_search_net", "dataset:search_qa", "dataset:eli5", "dataset:snli", "dataset:multi_nli", "dataset:wikihow", "dataset:natural_questions", "dataset:trivia_qa", "dataset:embedding-data/sentence-compression", "dataset:embedding-data/flickr30k-captions", "dataset:embedding-data/altlex", "dataset:embedding-data/simple-wiki", "dataset:embedding-data/QQP", "dataset:embedding-data/SPECTER", "dataset:embedding-data/PAQ_pairs", "dataset:embedding-data/WikiAnswers", "arxiv:1904.06472", "arxiv:2102.07033", "arxiv:2104.08727", "arxiv:1704.05179", "arxiv:1810.09305", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
sentence-similarity
"2022-03-02T23:29:05Z"
--- language: en license: apache-2.0 library_name: sentence-transformers tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers datasets: - s2orc - flax-sentence-embeddings/stackexchange_xml - ms_marco - gooaq - yahoo_answers_topics - code_search_net - search_qa - eli5 - snli - multi_nli - wikihow - natural_questions - trivia_qa - embedding-data/sentence-compression - embedding-data/flickr30k-captions - embedding-data/altlex - embedding-data/simple-wiki - embedding-data/QQP - embedding-data/SPECTER - embedding-data/PAQ_pairs - embedding-data/WikiAnswers pipeline_tag: sentence-similarity --- # all-distilroberta-v1 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('sentence-transformers/all-distilroberta-v1') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch import torch.nn.functional as F #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/all-distilroberta-v1') model = AutoModel.from_pretrained('sentence-transformers/all-distilroberta-v1') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) # Normalize embeddings sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=sentence-transformers/all-distilroberta-v1) ------ ## Background The project aims to train sentence embedding models on very large sentence level datasets using a self-supervised contrastive learning objective. We used the pretrained [`distilroberta-base`](https://huggingface.co/distilroberta-base) model and fine-tuned in on a 1B sentence pairs dataset. We use a contrastive learning objective: given a sentence from the pair, the model should predict which out of a set of randomly sampled other sentences, was actually paired with it in our dataset. We developped this model during the [Community week using JAX/Flax for NLP & CV](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104), organized by Hugging Face. We developped this model as part of the project: [Train the Best Sentence Embedding Model Ever with 1B Training Pairs](https://discuss.huggingface.co/t/train-the-best-sentence-embedding-model-ever-with-1b-training-pairs/7354). We benefited from efficient hardware infrastructure to run the project: 7 TPUs v3-8, as well as intervention from Googles Flax, JAX, and Cloud team member about efficient deep learning frameworks. ## Intended uses Our model is intented to be used as a sentence and short paragraph encoder. Given an input text, it ouptuts a vector which captures the semantic information. The sentence vector may be used for information retrieval, clustering or sentence similarity tasks. By default, input text longer than 128 word pieces is truncated. ## Training procedure ### Pre-training We use the pretrained [`distilroberta-base`](https://huggingface.co/distilroberta-base). Please refer to the model card for more detailed information about the pre-training procedure. ### Fine-tuning We fine-tune the model using a contrastive objective. Formally, we compute the cosine similarity from each possible sentence pairs from the batch. We then apply the cross entropy loss by comparing with true pairs. #### Hyper parameters We trained ou model on a TPU v3-8. We train the model during 920k steps using a batch size of 512 (64 per TPU core). We use a learning rate warm up of 500. The sequence length was limited to 128 tokens. We used the AdamW optimizer with a 2e-5 learning rate. The full training script is accessible in this current repository: `train_script.py`. #### Training data We use the concatenation from multiple datasets to fine-tune our model. The total number of sentence pairs is above 1 billion sentences. We sampled each dataset given a weighted probability which configuration is detailed in the `data_config.json` file. | Dataset | Paper | Number of training tuples | |--------------------------------------------------------|:----------------------------------------:|:--------------------------:| | [Reddit comments (2015-2018)](https://github.com/PolyAI-LDN/conversational-datasets/tree/master/reddit) | [paper](https://arxiv.org/abs/1904.06472) | 726,484,430 | | [S2ORC](https://github.com/allenai/s2orc) Citation pairs (Abstracts) | [paper](https://aclanthology.org/2020.acl-main.447/) | 116,288,806 | | [WikiAnswers](https://github.com/afader/oqa#wikianswers-corpus) Duplicate question pairs | [paper](https://doi.org/10.1145/2623330.2623677) | 77,427,422 | | [PAQ](https://github.com/facebookresearch/PAQ) (Question, Answer) pairs | [paper](https://arxiv.org/abs/2102.07033) | 64,371,441 | | [S2ORC](https://github.com/allenai/s2orc) Citation pairs (Titles) | [paper](https://aclanthology.org/2020.acl-main.447/) | 52,603,982 | | [S2ORC](https://github.com/allenai/s2orc) (Title, Abstract) | [paper](https://aclanthology.org/2020.acl-main.447/) | 41,769,185 | | [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) (Title, Body) pairs | - | 25,316,456 | | [MS MARCO](https://microsoft.github.io/msmarco/) triplets | [paper](https://doi.org/10.1145/3404835.3462804) | 9,144,553 | | [GOOAQ: Open Question Answering with Diverse Answer Types](https://github.com/allenai/gooaq) | [paper](https://arxiv.org/pdf/2104.08727.pdf) | 3,012,496 | | [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) (Title, Answer) | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 1,198,260 | | [Code Search](https://huggingface.co/datasets/code_search_net) | - | 1,151,414 | | [COCO](https://cocodataset.org/#home) Image captions | [paper](https://link.springer.com/chapter/10.1007%2F978-3-319-10602-1_48) | 828,395| | [SPECTER](https://github.com/allenai/specter) citation triplets | [paper](https://doi.org/10.18653/v1/2020.acl-main.207) | 684,100 | | [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) (Question, Answer) | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 681,164 | | [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) (Title, Question) | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 659,896 | | [SearchQA](https://huggingface.co/datasets/search_qa) | [paper](https://arxiv.org/abs/1704.05179) | 582,261 | | [Eli5](https://huggingface.co/datasets/eli5) | [paper](https://doi.org/10.18653/v1/p19-1346) | 325,475 | | [Flickr 30k](https://shannon.cs.illinois.edu/DenotationGraph/) | [paper](https://transacl.org/ojs/index.php/tacl/article/view/229/33) | 317,695 | | [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) Duplicate questions (titles) | | 304,525 | | AllNLI ([SNLI](https://nlp.stanford.edu/projects/snli/) and [MultiNLI](https://cims.nyu.edu/~sbowman/multinli/) | [paper SNLI](https://doi.org/10.18653/v1/d15-1075), [paper MultiNLI](https://doi.org/10.18653/v1/n18-1101) | 277,230 | | [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) Duplicate questions (bodies) | | 250,519 | | [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) Duplicate questions (titles+bodies) | | 250,460 | | [Sentence Compression](https://github.com/google-research-datasets/sentence-compression) | [paper](https://www.aclweb.org/anthology/D13-1155/) | 180,000 | | [Wikihow](https://github.com/pvl/wikihow_pairs_dataset) | [paper](https://arxiv.org/abs/1810.09305) | 128,542 | | [Altlex](https://github.com/chridey/altlex/) | [paper](https://aclanthology.org/P16-1135.pdf) | 112,696 | | [Quora Question Triplets](https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs) | - | 103,663 | | [Simple Wikipedia](https://cs.pomona.edu/~dkauchak/simplification/) | [paper](https://www.aclweb.org/anthology/P11-2117/) | 102,225 | | [Natural Questions (NQ)](https://ai.google.com/research/NaturalQuestions) | [paper](https://transacl.org/ojs/index.php/tacl/article/view/1455) | 100,231 | | [SQuAD2.0](https://rajpurkar.github.io/SQuAD-explorer/) | [paper](https://aclanthology.org/P18-2124.pdf) | 87,599 | | [TriviaQA](https://huggingface.co/datasets/trivia_qa) | - | 73,346 | | **Total** | | **1,124,818,467** |
facebook/musicgen-medium
facebook
"2023-11-17T15:25:23Z"
344,790
62
transformers
[ "transformers", "pytorch", "musicgen", "text-to-audio", "arxiv:2306.05284", "license:cc-by-nc-4.0", "endpoints_compatible", "has_space", "region:us" ]
text-to-audio
"2023-06-08T17:28:18Z"
--- inference: true tags: - musicgen license: cc-by-nc-4.0 pipeline_tag: text-to-audio widget: - text: a funky house with 80s hip hop vibes example_title: Prompt 1 - text: a chill song with influences from lofi, chillstep and downtempo example_title: Prompt 2 - text: a catchy beat for a podcast intro example_title: Prompt 3 --- # MusicGen - Medium - 1.5B MusicGen is a text-to-music model capable of genreating high-quality music samples conditioned on text descriptions or audio prompts. It is a single stage auto-regressive Transformer model trained over a 32kHz EnCodec tokenizer with 4 codebooks sampled at 50 Hz. Unlike existing methods, like MusicLM, MusicGen doesn't require a self-supervised semantic representation, and it generates all 4 codebooks in one pass. By introducing a small delay between the codebooks, we show we can predict them in parallel, thus having only 50 auto-regressive steps per second of audio. MusicGen was published in [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by *Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi, Alexandre Défossez*. Four checkpoints are released: - [small](https://huggingface.co/facebook/musicgen-small) - [**medium** (this checkpoint)](https://huggingface.co/facebook/musicgen-medium) - [large](https://huggingface.co/facebook/musicgen-large) - [melody](https://huggingface.co/facebook/musicgen-melody) ## Example Try out MusicGen yourself! * Audiocraft Colab: <a target="_blank" href="https://colab.research.google.com/drive/1fxGqfg96RBUvGxZ1XXN07s3DthrKUl4-?usp=sharing"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> * Hugging Face Colab: <a target="_blank" href="https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/MusicGen.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> * Hugging Face Demo: <a target="_blank" href="https://huggingface.co/spaces/facebook/MusicGen"> <img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="Open in HuggingFace"/> </a> ## 🤗 Transformers Usage You can run MusicGen locally with the 🤗 Transformers library from version 4.31.0 onwards. 1. First install the 🤗 [Transformers library](https://github.com/huggingface/transformers) and scipy: ``` pip install --upgrade pip pip install --upgrade transformers scipy ``` 2. Run inference via the `Text-to-Audio` (TTA) pipeline. You can infer the MusicGen model via the TTA pipeline in just a few lines of code! ```python from transformers import pipeline import scipy synthesiser = pipeline("text-to-audio", "facebook/musicgen-medium") music = synthesiser("lo-fi music with a soothing melody", forward_params={"do_sample": True}) scipy.io.wavfile.write("musicgen_out.wav", rate=music["sampling_rate"], data=music["audio"]) ``` 3. Run inference via the Transformers modelling code. You can use the processor + generate code to convert text into a mono 32 kHz audio waveform for more fine-grained control. ```python from transformers import AutoProcessor, MusicgenForConditionalGeneration processor = AutoProcessor.from_pretrained("facebook/musicgen-medium") model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-medium") inputs = processor( text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"], padding=True, return_tensors="pt", ) audio_values = model.generate(**inputs, max_new_tokens=256) ``` 3. Listen to the audio samples either in an ipynb notebook: ```python from IPython.display import Audio sampling_rate = model.config.audio_encoder.sampling_rate Audio(audio_values[0].numpy(), rate=sampling_rate) ``` Or save them as a `.wav` file using a third-party library, e.g. `scipy`: ```python import scipy sampling_rate = model.config.audio_encoder.sampling_rate scipy.io.wavfile.write("musicgen_out.wav", rate=sampling_rate, data=audio_values[0, 0].numpy()) ``` For more details on using the MusicGen model for inference using the 🤗 Transformers library, refer to the [MusicGen docs](https://huggingface.co/docs/transformers/model_doc/musicgen). ## Audiocraft Usage You can also run MusicGen locally through the original [Audiocraft library]((https://github.com/facebookresearch/audiocraft): 1. First install the [`audiocraft` library](https://github.com/facebookresearch/audiocraft) ``` pip install git+https://github.com/facebookresearch/audiocraft.git ``` 2. Make sure to have [`ffmpeg`](https://ffmpeg.org/download.html) installed: ``` apt-get install ffmpeg ``` 3. Run the following Python code: ```py from audiocraft.models import MusicGen from audiocraft.data.audio import audio_write model = MusicGen.get_pretrained("medium") model.set_generation_params(duration=8) # generate 8 seconds. descriptions = ["happy rock", "energetic EDM"] wav = model.generate(descriptions) # generates 2 samples. for idx, one_wav in enumerate(wav): # Will save under {idx}.wav, with loudness normalization at -14 db LUFS. audio_write(f'{idx}', one_wav.cpu(), model.sample_rate, strategy="loudness") ``` ## Model details **Organization developing the model:** The FAIR team of Meta AI. **Model date:** MusicGen was trained between April 2023 and May 2023. **Model version:** This is the version 1 of the model. **Model type:** MusicGen consists of an EnCodec model for audio tokenization, an auto-regressive language model based on the transformer architecture for music modeling. The model comes in different sizes: 300M, 1.5B and 3.3B parameters ; and two variants: a model trained for text-to-music generation task and a model trained for melody-guided music generation. **Paper or resources for more information:** More information can be found in the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284). **Citation details:** ``` @misc{copet2023simple, title={Simple and Controllable Music Generation}, author={Jade Copet and Felix Kreuk and Itai Gat and Tal Remez and David Kant and Gabriel Synnaeve and Yossi Adi and Alexandre Défossez}, year={2023}, eprint={2306.05284}, archivePrefix={arXiv}, primaryClass={cs.SD} } ``` **License:** Code is released under MIT, model weights are released under CC-BY-NC 4.0. **Where to send questions or comments about the model:** Questions and comments about MusicGen can be sent via the [Github repository](https://github.com/facebookresearch/audiocraft) of the project, or by opening an issue. ## Intended use **Primary intended use:** The primary use of MusicGen is research on AI-based music generation, including: - Research efforts, such as probing and better understanding the limitations of generative models to further improve the state of science - Generation of music guided by text or melody to understand current abilities of generative AI models by machine learning amateurs **Primary intended users:** The primary intended users of the model are researchers in audio, machine learning and artificial intelligence, as well as amateur seeking to better understand those models. **Out-of-scope use cases:** The model should not be used on downstream applications without further risk evaluation and mitigation. The model should not be used to intentionally create or disseminate music pieces that create hostile or alienating environments for people. This includes generating music that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes. ## Metrics **Models performance measures:** We used the following objective measure to evaluate the model on a standard music benchmark: - Frechet Audio Distance computed on features extracted from a pre-trained audio classifier (VGGish) - Kullback-Leibler Divergence on label distributions extracted from a pre-trained audio classifier (PaSST) - CLAP Score between audio embedding and text embedding extracted from a pre-trained CLAP model Additionally, we run qualitative studies with human participants, evaluating the performance of the model with the following axes: - Overall quality of the music samples; - Text relevance to the provided text input; - Adherence to the melody for melody-guided music generation. More details on performance measures and human studies can be found in the paper. **Decision thresholds:** Not applicable. ## Evaluation datasets The model was evaluated on the [MusicCaps benchmark](https://www.kaggle.com/datasets/googleai/musiccaps) and on an in-domain held-out evaluation set, with no artist overlap with the training set. ## Training datasets The model was trained on licensed data using the following sources: the [Meta Music Initiative Sound Collection](https://www.fb.com/sound), [Shutterstock music collection](https://www.shutterstock.com/music) and the [Pond5 music collection](https://www.pond5.com/). See the paper for more details about the training set and corresponding preprocessing. ## Evaluation results Below are the objective metrics obtained on MusicCaps with the released model. Note that for the publicly released models, we had all the datasets go through a state-of-the-art music source separation method, namely using the open source [Hybrid Transformer for Music Source Separation](https://github.com/facebookresearch/demucs) (HT-Demucs), in order to keep only the instrumental part. This explains the difference in objective metrics with the models used in the paper. | Model | Frechet Audio Distance | KLD | Text Consistency | Chroma Cosine Similarity | |---|---|---|---|---| | facebook/musicgen-small | 4.88 | 1.42 | 0.27 | - | | **facebook/musicgen-medium** | 5.14 | 1.38 | 0.28 | - | | facebook/musicgen-large | 5.48 | 1.37 | 0.28 | - | | facebook/musicgen-melody | 4.93 | 1.41 | 0.27 | 0.44 | More information can be found in the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284), in the Results section. ## Limitations and biases **Data:** The data sources used to train the model are created by music professionals and covered by legal agreements with the right holders. The model is trained on 20K hours of data, we believe that scaling the model on larger datasets can further improve the performance of the model. **Mitigations:** Vocals have been removed from the data source using corresponding tags, and then using a state-of-the-art music source separation method, namely using the open source [Hybrid Transformer for Music Source Separation](https://github.com/facebookresearch/demucs) (HT-Demucs). **Limitations:** - The model is not able to generate realistic vocals. - The model has been trained with English descriptions and will not perform as well in other languages. - The model does not perform equally well for all music styles and cultures. - The model sometimes generates end of songs, collapsing to silence. - It is sometimes difficult to assess what types of text descriptions provide the best generations. Prompt engineering may be required to obtain satisfying results. **Biases:** The source of data is potentially lacking diversity and all music cultures are not equally represented in the dataset. The model may not perform equally well on the wide variety of music genres that exists. The generated samples from the model will reflect the biases from the training data. Further work on this model should include methods for balanced and just representations of cultures, for example, by scaling the training data to be both diverse and inclusive. **Risks and harms:** Biases and limitations of the model may lead to generation of samples that may be considered as biased, inappropriate or offensive. We believe that providing the code to reproduce the research and train new models will allow to broaden the application to new and more representative data. **Use cases:** Users must be aware of the biases, limitations and risks of the model. MusicGen is a model developed for artificial intelligence research on controllable music generation. As such, it should not be used for downstream applications without further investigation and mitigation of risks.
sentence-transformers/msmarco-distilbert-cos-v5
sentence-transformers
"2024-03-27T11:28:45Z"
341,494
8
sentence-transformers
[ "sentence-transformers", "pytorch", "tf", "safetensors", "distilbert", "feature-extraction", "sentence-similarity", "transformers", "en", "arxiv:1908.10084", "endpoints_compatible", "has_space", "region:us" ]
sentence-similarity
"2022-03-02T23:29:05Z"
--- language: - en library_name: sentence-transformers tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers pipeline_tag: sentence-similarity --- # msmarco-distilbert-cos-v5 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and was designed for **semantic search**. It has been trained on 500k (query, answer) pairs from the [MS MARCO Passages dataset](https://github.com/microsoft/MSMARCO-Passage-Ranking). For an introduction to semantic search, have a look at: [SBERT.net - Semantic Search](https://www.sbert.net/examples/applications/semantic-search/README.html) ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer, util query = "How many people live in London?" docs = ["Around 9 Million people live in London", "London is known for its financial district"] #Load the model model = SentenceTransformer('sentence-transformers/msmarco-distilbert-cos-v5') #Encode query and documents query_emb = model.encode(query) doc_emb = model.encode(docs) #Compute dot score between query and all document embeddings scores = util.dot_score(query_emb, doc_emb)[0].cpu().tolist() #Combine docs & scores doc_score_pairs = list(zip(docs, scores)) #Sort by decreasing score doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True) #Output passages & scores for doc, score in doc_score_pairs: print(score, doc) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the correct pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch import torch.nn.functional as F #Mean Pooling - Take average of all tokens def mean_pooling(model_output, attention_mask): token_embeddings = model_output.last_hidden_state #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) #Encode text def encode(texts): # Tokenize sentences encoded_input = tokenizer(texts, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input, return_dict=True) # Perform pooling embeddings = mean_pooling(model_output, encoded_input['attention_mask']) # Normalize embeddings embeddings = F.normalize(embeddings, p=2, dim=1) return embeddings # Sentences we want sentence embeddings for query = "How many people live in London?" docs = ["Around 9 Million people live in London", "London is known for its financial district"] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/msmarco-distilbert-cos-v5") model = AutoModel.from_pretrained("sentence-transformers/msmarco-distilbert-cos-v5") #Encode query and docs query_emb = encode(query) doc_emb = encode(docs) #Compute dot score between query and all document embeddings scores = torch.mm(query_emb, doc_emb.transpose(0, 1))[0].cpu().tolist() #Combine docs & scores doc_score_pairs = list(zip(docs, scores)) #Sort by decreasing score doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True) #Output passages & scores for doc, score in doc_score_pairs: print(score, doc) ``` ## Technical Details In the following some technical details how this model must be used: | Setting | Value | | --- | :---: | | Dimensions | 768 | | Produces normalized embeddings | Yes | | Pooling-Method | Mean pooling | | Suitable score functions | dot-product (`util.dot_score`), cosine-similarity (`util.cos_sim`), or euclidean distance | Note: When loaded with `sentence-transformers`, this model produces normalized embeddings with length 1. In that case, dot-product and cosine-similarity are equivalent. dot-product is preferred as it is faster. Euclidean distance is proportional to dot-product and can also be used. ## Citing & Authors This model was trained by [sentence-transformers](https://www.sbert.net/). If you find this model helpful, feel free to cite our publication [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](https://arxiv.org/abs/1908.10084): ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "http://arxiv.org/abs/1908.10084", } ```
nvidia/parakeet-tdt-1.1b
nvidia
"2024-01-31T16:15:54Z"
340,341
55
nemo
[ "nemo", "automatic-speech-recognition", "speech", "audio", "Transducer", "TDT", "FastConformer", "Conformer", "pytorch", "NeMo", "hf-asr-leaderboard", "en", "dataset:librispeech_asr", "dataset:fisher_corpus", "dataset:Switchboard-1", "dataset:WSJ-0", "dataset:WSJ-1", "dataset:National-Singapore-Corpus-Part-1", "dataset:National-Singapore-Corpus-Part-6", "dataset:vctk", "dataset:voxpopuli", "dataset:europarl", "dataset:multilingual_librispeech", "dataset:mozilla-foundation/common_voice_8_0", "dataset:MLCommons/peoples_speech", "arxiv:2304.06795", "arxiv:2305.05084", "license:cc-by-4.0", "model-index", "has_space", "region:us" ]
automatic-speech-recognition
"2024-01-25T02:05:06Z"
--- language: - en library_name: nemo datasets: - librispeech_asr - fisher_corpus - Switchboard-1 - WSJ-0 - WSJ-1 - National-Singapore-Corpus-Part-1 - National-Singapore-Corpus-Part-6 - vctk - voxpopuli - europarl - multilingual_librispeech - mozilla-foundation/common_voice_8_0 - MLCommons/peoples_speech thumbnail: null tags: - automatic-speech-recognition - speech - audio - Transducer - TDT - FastConformer - Conformer - pytorch - NeMo - hf-asr-leaderboard license: cc-by-4.0 widget: - example_title: Librispeech sample 1 src: https://cdn-media.huggingface.co/speech_samples/sample1.flac - example_title: Librispeech sample 2 src: https://cdn-media.huggingface.co/speech_samples/sample2.flac model-index: - name: parakeet_tdt_1.1b results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: AMI (Meetings test) type: edinburghcstr/ami config: ihm split: test args: language: en metrics: - name: Test WER type: wer value: 15.90 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Earnings-22 type: revdotcom/earnings22 split: test args: language: en metrics: - name: Test WER type: wer value: 14.65 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: GigaSpeech type: speechcolab/gigaspeech split: test args: language: en metrics: - name: Test WER type: wer value: 9.55 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: LibriSpeech (clean) type: librispeech_asr config: other split: test args: language: en metrics: - name: Test WER type: wer value: 1.39 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: LibriSpeech (other) type: librispeech_asr config: other split: test args: language: en metrics: - name: Test WER type: wer value: 2.62 - task: type: Automatic Speech Recognition name: automatic-speech-recognition dataset: name: SPGI Speech type: kensho/spgispeech config: test split: test args: language: en metrics: - name: Test WER type: wer value: 3.42 - task: type: Automatic Speech Recognition name: automatic-speech-recognition dataset: name: tedlium-v3 type: LIUM/tedlium config: release1 split: test args: language: en metrics: - name: Test WER type: wer value: 3.56 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Vox Populi type: facebook/voxpopuli config: en split: test args: language: en metrics: - name: Test WER type: wer value: 5.48 - task: type: Automatic Speech Recognition name: automatic-speech-recognition dataset: name: Mozilla Common Voice 9.0 type: mozilla-foundation/common_voice_9_0 config: en split: test args: language: en metrics: - name: Test WER type: wer value: 5.97 metrics: - wer pipeline_tag: automatic-speech-recognition --- # Parakeet TDT 1.1B (en) <style> img { display: inline; } </style> [![Model architecture](https://img.shields.io/badge/Model_Arch-FastConformer--TDT-lightgrey#model-badge)](#model-architecture) | [![Model size](https://img.shields.io/badge/Params-1.1B-lightgrey#model-badge)](#model-architecture) | [![Language](https://img.shields.io/badge/Language-en-lightgrey#model-badge)](#datasets) `parakeet-tdt-1.1b` is an ASR model that transcribes speech in lower case English alphabet. This model is jointly developed by [NVIDIA NeMo](https://github.com/NVIDIA/NeMo) and [Suno.ai](https://www.suno.ai/) teams. It is an XXL version of FastConformer [1] TDT [2] (around 1.1B parameters) model. See the [model architecture](#model-architecture) section and [NeMo documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/models.html#fast-conformer) for complete architecture details. ## NVIDIA NeMo: Training To train, fine-tune or play with the model you will need to install [NVIDIA NeMo](https://github.com/NVIDIA/NeMo). We recommend you install it after you've installed latest PyTorch version. ``` pip install nemo_toolkit['all'] ``` ## How to Use this Model The model is available for use in the NeMo toolkit [3], and can be used as a pre-trained checkpoint for inference or for fine-tuning on another dataset. ### Automatically instantiate the model ```python import nemo.collections.asr as nemo_asr asr_model = nemo_asr.models.EncDecRNNTBPEModel.from_pretrained(model_name="nvidia/parakeet-tdt-1.1b") ``` ### Transcribing using Python First, let's get a sample ``` wget https://dldata-public.s3.us-east-2.amazonaws.com/2086-149220-0033.wav ``` Then simply do: ``` asr_model.transcribe(['2086-149220-0033.wav']) ``` ### Transcribing many audio files ```shell python [NEMO_GIT_FOLDER]/examples/asr/transcribe_speech.py pretrained_name="nvidia/parakeet-tdt-1.1b" audio_dir="<DIRECTORY CONTAINING AUDIO FILES>" ``` ### Input This model accepts 16000 Hz mono-channel audio (wav files) as input. ### Output This model provides transcribed speech as a string for a given audio sample. ## Model Architecture This model uses a FastConformer-TDT architecture. FastConformer [1] is an optimized version of the Conformer model with 8x depthwise-separable convolutional downsampling. You may find more information on the details of FastConformer here: [Fast-Conformer Model](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/models.html#fast-conformer). TDT (Token-and-Duration Transducer) [2] is a generalization of conventional Transducers by decoupling token and duration predictions. Unlike conventional Transducers which produces a lot of blanks during inference, a TDT model can skip majority of blank predictions by using the duration output (up to 4 frames for this parakeet-tdt-1.1b model), thus brings significant inference speed-up. The detail of TDT can be found here: [Efficient Sequence Transduction by Jointly Predicting Tokens and Durations](https://arxiv.org/abs/2304.06795). ## Training The NeMo toolkit [3] was used for training the models for over several hundred epochs. These model are trained with this [example script](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/asr_transducer/speech_to_text_rnnt_bpe.py) and this [base config](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/conf/fastconformer/fast-conformer_transducer_bpe.yaml). The tokenizers for these models were built using the text transcripts of the train set with this [script](https://github.com/NVIDIA/NeMo/blob/main/scripts/tokenizers/process_asr_text_tokenizer.py). ### Datasets The model was trained on 64K hours of English speech collected and prepared by NVIDIA NeMo and Suno teams. The training dataset consists of private subset with 40K hours of English speech plus 24K hours from the following public datasets: - Librispeech 960 hours of English speech - Fisher Corpus - Switchboard-1 Dataset - WSJ-0 and WSJ-1 - National Speech Corpus (Part 1, Part 6) - VCTK - VoxPopuli (EN) - Europarl-ASR (EN) - Multilingual Librispeech (MLS EN) - 2,000 hour subset - Mozilla Common Voice (v7.0) - People's Speech - 12,000 hour subset ## Performance The performance of Automatic Speech Recognition models is measuring using Word Error Rate. Since this dataset is trained on multiple domains and a much larger corpus, it will generally perform better at transcribing audio in general. The following tables summarizes the performance of the available models in this collection with the Transducer decoder. Performances of the ASR models are reported in terms of Word Error Rate (WER%) with greedy decoding. |**Version**|**Tokenizer**|**Vocabulary Size**|**AMI**|**Earnings-22**|**Giga Speech**|**LS test-clean**|**SPGI Speech**|**TEDLIUM-v3**|**Vox Populi**|**Common Voice**| |---------|-----------------------|-----------------|---------------|---------------|------------|-----------|-----|-------|------|------| | 1.22.0 | SentencePiece Unigram | 1024 | 15.90 | 14.65 | 9.55 | 1.39 | 2.62 | 3.42 | 3.56 | 5.48 | 5.97 | These are greedy WER numbers without external LM. More details on evaluation can be found at [HuggingFace ASR Leaderboard](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard) ## NVIDIA Riva: Deployment [NVIDIA Riva](https://developer.nvidia.com/riva), is an accelerated speech AI SDK deployable on-prem, in all clouds, multi-cloud, hybrid, on edge, and embedded. Additionally, Riva provides: * World-class out-of-the-box accuracy for the most common languages with model checkpoints trained on proprietary data with hundreds of thousands of GPU-compute hours * Best in class accuracy with run-time word boosting (e.g., brand and product names) and customization of acoustic model, language model, and inverse text normalization * Streaming speech recognition, Kubernetes compatible scaling, and enterprise-grade support Although this model isn’t supported yet by Riva, the [list of supported models is here](https://huggingface.co/models?other=Riva). Check out [Riva live demo](https://developer.nvidia.com/riva#demos). ## References [1] [Fast Conformer with Linearly Scalable Attention for Efficient Speech Recognition](https://arxiv.org/abs/2305.05084) [2] [Efficient Sequence Transduction by Jointly Predicting Tokens and Durations](https://arxiv.org/abs/2304.06795) [3] [Google Sentencepiece Tokenizer](https://github.com/google/sentencepiece) [4] [NVIDIA NeMo Toolkit](https://github.com/NVIDIA/NeMo) [5] [Suno.ai](https://suno.ai/) [6] [HuggingFace ASR Leaderboard](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard) ## Licence License to use this model is covered by the [CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/). By downloading the public and release version of the model, you accept the terms and conditions of the [CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/) license.
latent-consistency/lcm-lora-sdv1-5
latent-consistency
"2023-11-16T16:01:30Z"
339,635
410
diffusers
[ "diffusers", "lora", "text-to-image", "arxiv:2311.05556", "base_model:runwayml/stable-diffusion-v1-5", "license:openrail++", "has_space", "region:us" ]
text-to-image
"2023-11-07T11:20:24Z"
--- library_name: diffusers base_model: runwayml/stable-diffusion-v1-5 tags: - lora - text-to-image license: openrail++ inference: false --- # Latent Consistency Model (LCM) LoRA: SDv1-5 Latent Consistency Model (LCM) LoRA was proposed in [LCM-LoRA: A universal Stable-Diffusion Acceleration Module](https://arxiv.org/abs/2311.05556) by *Simian Luo, Yiqin Tan, Suraj Patil, Daniel Gu et al.* It is a distilled consistency adapter for [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) that allows to reduce the number of inference steps to only between **2 - 8 steps**. | Model | Params / M | |----------------------------------------------------------------------------|------------| | [**lcm-lora-sdv1-5**](https://huggingface.co/latent-consistency/lcm-lora-sdv1-5) | **67.5** | | [lcm-lora-ssd-1b](https://huggingface.co/latent-consistency/lcm-lora-ssd-1b) | 105 | | [lcm-lora-sdxl](https://huggingface.co/latent-consistency/lcm-lora-sdxl) | 197M | ## Usage LCM-LoRA is supported in 🤗 Hugging Face Diffusers library from version v0.23.0 onwards. To run the model, first install the latest version of the Diffusers library as well as `peft`, `accelerate` and `transformers`. audio dataset from the Hugging Face Hub: ```bash pip install --upgrade pip pip install --upgrade diffusers transformers accelerate peft ``` ***Note: For detailed usage examples we recommend you to check out our official [LCM-LoRA docs](https://huggingface.co/docs/diffusers/main/en/using-diffusers/inference_with_lcm_lora)*** ### Text-to-Image The adapter can be loaded with SDv1-5 or deviratives. Here we use [`Lykon/dreamshaper-7`](https://huggingface.co/Lykon/dreamshaper-7). Next, the scheduler needs to be changed to [`LCMScheduler`](https://huggingface.co/docs/diffusers/v0.22.3/en/api/schedulers/lcm#diffusers.LCMScheduler) and we can reduce the number of inference steps to just 2 to 8 steps. Please make sure to either disable `guidance_scale` or use values between 1.0 and 2.0. ```python import torch from diffusers import LCMScheduler, AutoPipelineForText2Image model_id = "Lykon/dreamshaper-7" adapter_id = "latent-consistency/lcm-lora-sdv1-5" pipe = AutoPipelineForText2Image.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16") pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") # load and fuse lcm lora pipe.load_lora_weights(adapter_id) pipe.fuse_lora() prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k" # disable guidance_scale by passing 0 image = pipe(prompt=prompt, num_inference_steps=4, guidance_scale=0).images[0] ``` ![](./image.png) ### Image-to-Image LCM-LoRA can be applied to image-to-image tasks too. Let's look at how we can perform image-to-image generation with LCMs. For this example we'll use the [dreamshaper-7](https://huggingface.co/Lykon/dreamshaper-7) model and the LCM-LoRA for `stable-diffusion-v1-5 `. ```python import torch from diffusers import AutoPipelineForImage2Image, LCMScheduler from diffusers.utils import make_image_grid, load_image pipe = AutoPipelineForImage2Image.from_pretrained( "Lykon/dreamshaper-7", torch_dtype=torch.float16, variant="fp16", ).to("cuda") # set scheduler pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) # load LCM-LoRA pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") pipe.fuse_lora() # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" init_image = load_image(url) prompt = "Astronauts in a jungle, cold color palette, muted colors, detailed, 8k" # pass prompt and image to pipeline generator = torch.manual_seed(0) image = pipe( prompt, image=init_image, num_inference_steps=4, guidance_scale=1, strength=0.6, generator=generator ).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdv1-5_i2i.png) ### Inpainting LCM-LoRA can be used for inpainting as well. ```python import torch from diffusers import AutoPipelineForInpainting, LCMScheduler from diffusers.utils import load_image, make_image_grid pipe = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16", ).to("cuda") # set scheduler pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) # load LCM-LoRA pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") pipe.fuse_lora() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") # generator = torch.Generator("cuda").manual_seed(92) prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" generator = torch.manual_seed(0) image = pipe( prompt=prompt, image=init_image, mask_image=mask_image, generator=generator, num_inference_steps=4, guidance_scale=4, ).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdv1-5_inpainting.png) ### ControlNet For this example, we'll use the SD-v1-5 model and the LCM-LoRA for SD-v1-5 with canny ControlNet. ```python import torch import cv2 import numpy as np from PIL import Image from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, LCMScheduler from diffusers.utils import load_image image = load_image( "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" ).resize((512, 512)) image = np.array(image) low_threshold = 100 high_threshold = 200 image = cv2.Canny(image, low_threshold, high_threshold) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) canny_image = Image.fromarray(image) controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, safety_checker=None, variant="fp16" ).to("cuda") # set scheduler pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) # load LCM-LoRA pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") generator = torch.manual_seed(0) image = pipe( "the mona lisa", image=canny_image, num_inference_steps=4, guidance_scale=1.5, controlnet_conditioning_scale=0.8, cross_attention_kwargs={"scale": 1}, generator=generator, ).images[0] make_image_grid([canny_image, image], rows=1, cols=2) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdv1-5_controlnet.png) ## Speed Benchmark TODO ## Training TODO
facebook/wav2vec2-base
facebook
"2021-12-28T12:44:31Z"
338,120
55
transformers
[ "transformers", "pytorch", "wav2vec2", "pretraining", "speech", "en", "dataset:librispeech_asr", "arxiv:2006.11477", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
null
"2022-03-02T23:29:05Z"
--- language: en datasets: - librispeech_asr tags: - speech license: apache-2.0 --- # Wav2Vec2-Base [Facebook's Wav2Vec2](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) The base model pretrained on 16kHz sampled speech audio. When using the model make sure that your speech input is also sampled at 16Khz. **Note**: This model does not have a tokenizer as it was pretrained on audio alone. In order to use this model **speech recognition**, a tokenizer should be created and the model should be fine-tuned on labeled text data. Check out [this blog](https://huggingface.co/blog/fine-tune-wav2vec2-english) for more in-detail explanation of how to fine-tune the model. [Paper](https://arxiv.org/abs/2006.11477) Authors: Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli **Abstract** We show for the first time that learning powerful representations from speech audio alone followed by fine-tuning on transcribed speech can outperform the best semi-supervised methods while being conceptually simpler. wav2vec 2.0 masks the speech input in the latent space and solves a contrastive task defined over a quantization of the latent representations which are jointly learned. Experiments using all labeled data of Librispeech achieve 1.8/3.3 WER on the clean/other test sets. When lowering the amount of labeled data to one hour, wav2vec 2.0 outperforms the previous state of the art on the 100 hour subset while using 100 times less labeled data. Using just ten minutes of labeled data and pre-training on 53k hours of unlabeled data still achieves 4.8/8.2 WER. This demonstrates the feasibility of speech recognition with limited amounts of labeled data. The original model can be found under https://github.com/pytorch/fairseq/tree/master/examples/wav2vec#wav2vec-20. # Usage See [this notebook](https://colab.research.google.com/drive/1FjTsqbYKphl9kL-eILgUc-bl4zVThL8F?usp=sharing) for more information on how to fine-tune the model.
timm/resnetv2_50x1_bit.goog_in21k
timm
"2024-02-10T23:35:26Z"
337,408
4
timm
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-21k", "arxiv:1912.11370", "arxiv:1603.05027", "license:apache-2.0", "region:us" ]
image-classification
"2023-03-22T20:57:10Z"
--- license: apache-2.0 library_name: timm tags: - image-classification - timm datasets: - imagenet-21k --- # Model card for resnetv2_50x1_bit.goog_in21k A ResNet-V2-BiT (Big Transfer w/ pre-activation ResNet) image classification model. Trained on ImageNet-21k by paper authors. This model uses: * Group Normalization (GN) in combination with Weight Standardization (WS) instead of Batch Normalization (BN).. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 68.3 - GMACs: 4.3 - Activations (M): 11.1 - Image size: 224 x 224 - **Papers:** - Big Transfer (BiT): General Visual Representation Learning: https://arxiv.org/abs/1912.11370 - Identity Mappings in Deep Residual Networks: https://arxiv.org/abs/1603.05027 - **Dataset:** ImageNet-21k - **Original:** https://github.com/google-research/big_transfer ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('resnetv2_50x1_bit.goog_in21k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_50x1_bit.goog_in21k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 1024, 14, 14]) # torch.Size([1, 2048, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_50x1_bit.goog_in21k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2048, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{Kolesnikov2019BigT, title={Big Transfer (BiT): General Visual Representation Learning}, author={Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby}, booktitle={European Conference on Computer Vision}, year={2019} } ``` ```bibtex @article{He2016, author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, title = {Identity Mappings in Deep Residual Networks}, journal = {arXiv preprint arXiv:1603.05027}, year = {2016} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```