modelId
stringlengths
5
122
author
stringlengths
2
42
last_modified
unknown
downloads
int64
0
435M
likes
int64
0
6.52k
library_name
stringclasses
345 values
tags
sequencelengths
1
4.05k
pipeline_tag
stringclasses
51 values
createdAt
unknown
card
stringlengths
1
913k
Salesforce/blip2-itm-vit-g
Salesforce
"2024-06-28T14:45:48Z"
97,187
0
transformers
[ "transformers", "pytorch", "safetensors", "blip-2", "zero-shot-image-classification", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
zero-shot-image-classification
"2023-08-23T13:32:06Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
microsoft/BiomedNLP-BiomedBERT-base-uncased-abstract-fulltext
microsoft
"2023-11-06T18:03:43Z"
97,039
185
transformers
[ "transformers", "pytorch", "jax", "bert", "fill-mask", "exbert", "en", "arxiv:2007.15779", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
"2022-03-02T23:29:05Z"
--- language: en tags: - exbert license: mit widget: - text: "[MASK] is a tumor suppressor gene." --- ## MSR BiomedBERT (abstracts + full text) <div style="border: 2px solid orange; border-radius:10px; padding:0px 10px; width: fit-content;"> * This model was previously named **"PubMedBERT (abstracts + full text)"**. * You can either adopt the new model name "microsoft/BiomedNLP-BiomedBERT-base-uncased-abstract-fulltext" or update your `transformers` library to version 4.22+ if you need to refer to the old name. </div> Pretraining large neural language models, such as BERT, has led to impressive gains on many natural language processing (NLP) tasks. However, most pretraining efforts focus on general domain corpora, such as newswire and Web. A prevailing assumption is that even domain-specific pretraining can benefit by starting from general-domain language models. [Recent work](https://arxiv.org/abs/2007.15779) shows that for domains with abundant unlabeled text, such as biomedicine, pretraining language models from scratch results in substantial gains over continual pretraining of general-domain language models. BiomedBERT is pretrained from scratch using _abstracts_ from [PubMed](https://pubmed.ncbi.nlm.nih.gov/) and _full-text_ articles from [PubMedCentral](https://www.ncbi.nlm.nih.gov/pmc/). This model achieves state-of-the-art performance on many biomedical NLP tasks, and currently holds the top score on the [Biomedical Language Understanding and Reasoning Benchmark](https://aka.ms/BLURB). ## Citation If you find BiomedBERT useful in your research, please cite the following paper: ```latex @misc{pubmedbert, author = {Yu Gu and Robert Tinn and Hao Cheng and Michael Lucas and Naoto Usuyama and Xiaodong Liu and Tristan Naumann and Jianfeng Gao and Hoifung Poon}, title = {Domain-Specific Language Model Pretraining for Biomedical Natural Language Processing}, year = {2020}, eprint = {arXiv:2007.15779}, } ``` <a href="https://huggingface.co/exbert/?model=microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext&modelKind=bidirectional&sentence=Gefitinib%20is%20an%20EGFR%20tyrosine%20kinase%20inhibitor,%20which%20is%20often%20used%20for%20breast%20cancer%20and%20NSCLC%20treatment.&layer=3&heads=..0,1,2,3,4,5,6,7,8,9,10,11&threshold=0.7&tokenInd=17&tokenSide=right&maskInds=..&hideClsSep=true"> <img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png"> </a>
dmis-lab/biobert-base-cased-v1.2
dmis-lab
"2021-06-24T02:54:58Z"
96,988
41
transformers
[ "transformers", "pytorch", "bert", "fill-mask", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
"2022-03-02T23:29:05Z"
Entry not found
google/flan-ul2
google
"2023-11-07T15:11:54Z"
96,826
550
transformers
[ "transformers", "pytorch", "t5", "text2text-generation", "flan-ul2", "en", "fr", "ro", "de", "multilingual", "dataset:svakulenk0/qrecc", "dataset:taskmaster2", "dataset:djaym7/wiki_dialog", "dataset:deepmind/code_contests", "dataset:lambada", "dataset:gsm8k", "dataset:aqua_rat", "dataset:esnli", "dataset:quasc", "dataset:qed", "dataset:c4", "arxiv:2205.05131", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text2text-generation
"2023-03-03T10:37:27Z"
--- language: - en - fr - ro - de - multilingual widget: - text: 'Translate to German: My name is Arthur' example_title: Translation - text: >- Please answer to the following question. Who is going to be the next Ballon d'or? example_title: Question Answering - text: >- Q: Can Geoffrey Hinton have a conversation with George Washington? Give the rationale before answering. example_title: Logical reasoning - text: >- Please answer the following question. What is the boiling point of Nitrogen? example_title: Scientific knowledge - text: >- Answer the following yes/no question. Can you write a whole Haiku in a single tweet? example_title: Yes/no question - text: >- Answer the following yes/no question by reasoning step-by-step. Can you write a whole Haiku in a single tweet? example_title: Reasoning task - text: 'Q: ( False or not False or False ) is? A: Let''s think step by step' example_title: Boolean Expressions - text: >- The square root of x is the cube root of y. What is y to the power of 2, if x = 4? example_title: Math reasoning - text: >- Premise: At my age you will probably have learnt one lesson. Hypothesis: It's not certain how many lessons you'll learn by your thirties. Does the premise entail the hypothesis? example_title: Premise and hypothesis - text: >- Answer the following question by reasoning step by step. The cafeteria had 23 apples. If they used 20 for lunch, and bought 6 more, how many apple do they have? example_title: Chain of thought tags: - text2text-generation - flan-ul2 datasets: - svakulenk0/qrecc - taskmaster2 - djaym7/wiki_dialog - deepmind/code_contests - lambada - gsm8k - aqua_rat - esnli - quasc - qed - c4 license: apache-2.0 --- # Model card for Flan-UL2 ![model image](https://raw.githubusercontent.com/google-research/google-research/master/ul2/figs/ul2.png) # Table of Contents 0. [TL;DR](#TL;DR) 1. [Using the model](#using-the-model) 2. [Results](#results) 3. [Introduction to UL2](#introduction-to-ul2) 4. [Training](#training) 5. [Contribution](#contribution) 6. [Citation](#citation) # TL;DR Flan-UL2 is an encoder decoder model based on the `T5` architecture. It uses the same configuration as the [`UL2 model`](https://huggingface.co/google/ul2) released earlier last year. It was fine tuned using the "Flan" prompt tuning and dataset collection. According to the original [blog](https://www.yitay.net/blog/flan-ul2-20b) here are the notable improvements: - The original UL2 model was only trained with receptive field of 512, which made it non-ideal for N-shot prompting where N is large. - The Flan-UL2 checkpoint uses a receptive field of 2048 which makes it more usable for few-shot in-context learning. - The original UL2 model also had mode switch tokens that was rather mandatory to get good performance. However, they were a little cumbersome as this requires often some changes during inference or finetuning. In this update/change, we continue training UL2 20B for an additional 100k steps (with small batch) to forget “mode tokens” before applying Flan instruction tuning. This Flan-UL2 checkpoint does not require mode tokens anymore. # Using the model ## Converting from T5x to huggingface You can use the [`convert_t5x_checkpoint_to_pytorch.py`](https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/convert_t5x_checkpoint_to_pytorch.py) script and pass the argument `strict = False`. The final layer norm is missing from the original dictionnary, that is why we are passing the `strict = False` argument. ```bash python convert_t5x_checkpoint_to_pytorch.py --t5x_checkpoint_path PATH_TO_T5X_CHECKPOINTS --config_file PATH_TO_CONFIG --pytorch_dump_path PATH_TO_SAVE ``` We used the same config file as [`google/ul2`](https://huggingface.co/google/ul2/blob/main/config.json). ## Running the model For more efficient memory usage, we advise you to load the model in `8bit` using `load_in_8bit` flag as follows (works only under GPU): ```python # pip install accelerate transformers bitsandbytes from transformers import T5ForConditionalGeneration, AutoTokenizer import torch model = T5ForConditionalGeneration.from_pretrained("google/flan-ul2", device_map="auto", load_in_8bit=True) tokenizer = AutoTokenizer.from_pretrained("google/flan-ul2") input_string = "Answer the following question by reasoning step by step. The cafeteria had 23 apples. If they used 20 for lunch, and bought 6 more, how many apple do they have?" inputs = tokenizer(input_string, return_tensors="pt").input_ids.to("cuda") outputs = model.generate(inputs, max_length=200) print(tokenizer.decode(outputs[0])) # <pad> They have 23 - 20 = 3 apples left. They have 3 + 6 = 9 apples. Therefore, the answer is 9.</s> ``` Otherwise, you can load and run the model in `bfloat16` as follows: ```python # pip install accelerate transformers from transformers import T5ForConditionalGeneration, AutoTokenizer import torch model = T5ForConditionalGeneration.from_pretrained("google/flan-ul2", torch_dtype=torch.bfloat16, device_map="auto") tokenizer = AutoTokenizer.from_pretrained("google/flan-ul2") input_string = "Answer the following question by reasoning step by step. The cafeteria had 23 apples. If they used 20 for lunch, and bought 6 more, how many apple do they have?" inputs = tokenizer(input_string, return_tensors="pt").input_ids.to("cuda") outputs = model.generate(inputs, max_length=200) print(tokenizer.decode(outputs[0])) # <pad> They have 23 - 20 = 3 apples left. They have 3 + 6 = 9 apples. Therefore, the answer is 9.</s> ``` # Results ## Performance improvment The reported results are the following : | | MMLU | BBH | MMLU-CoT | BBH-CoT | Avg | | :--- | :---: | :---: | :---: | :---: | :---: | | FLAN-PaLM 62B | 59.6 | 47.5 | 56.9 | 44.9 | 49.9 | | FLAN-PaLM 540B | 73.5 | 57.9 | 70.9 | 66.3 | 67.2 | | FLAN-T5-XXL 11B | 55.1 | 45.3 | 48.6 | 41.4 | 47.6 | | FLAN-UL2 20B | 55.7(+1.1%) | 45.9(+1.3%) | 52.2(+7.4%) | 42.7(+3.1%) | 49.1(+3.2%) | # Introduction to UL2 This entire section has been copied from the [`google/ul2`](https://huggingface.co/google/ul2) model card and might be subject of change with respect to `flan-ul2`. UL2 is a unified framework for pretraining models that are universally effective across datasets and setups. UL2 uses Mixture-of-Denoisers (MoD), apre-training objective that combines diverse pre-training paradigms together. UL2 introduces a notion of mode switching, wherein downstream fine-tuning is associated with specific pre-training schemes. ![model image](https://raw.githubusercontent.com/google-research/google-research/master/ul2/figs/ul2.png) **Abstract** Existing pre-trained models are generally geared towards a particular class of problems. To date, there seems to be still no consensus on what the right architecture and pre-training setup should be. This paper presents a unified framework for pre-training models that are universally effective across datasets and setups. We begin by disentangling architectural archetypes with pre-training objectives -- two concepts that are commonly conflated. Next, we present a generalized and unified perspective for self-supervision in NLP and show how different pre-training objectives can be cast as one another and how interpolating between different objectives can be effective. We then propose Mixture-of-Denoisers (MoD), a pre-training objective that combines diverse pre-training paradigms together. We furthermore introduce a notion of mode switching, wherein downstream fine-tuning is associated with specific pre-training schemes. We conduct extensive ablative experiments to compare multiple pre-training objectives and find that our method pushes the Pareto-frontier by outperforming T5 and/or GPT-like models across multiple diverse setups. Finally, by scaling our model up to 20B parameters, we achieve SOTA performance on 50 well-established supervised NLP tasks ranging from language generation (with automated and human evaluation), language understanding, text classification, question answering, commonsense reasoning, long text reasoning, structured knowledge grounding and information retrieval. Our model also achieve strong results at in-context learning, outperforming 175B GPT-3 on zero-shot SuperGLUE and tripling the performance of T5-XXL on one-shot summarization. For more information, please take a look at the original paper. Paper: [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) Authors: *Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler* ## Training ### Flan UL2 The Flan-UL2 model was initialized using the `UL2` checkpoints, and was then trained additionally using Flan Prompting. This means that the original training corpus is `C4`, In “Scaling Instruction-Finetuned language models (Chung et al.)” (also referred to sometimes as the Flan2 paper), the key idea is to train a large language model on a collection of datasets. These datasets are phrased as instructions which enable generalization across diverse tasks. Flan has been primarily trained on academic tasks. In Flan2, we released a series of T5 models ranging from 200M to 11B parameters that have been instruction tuned with Flan. The Flan datasets have also been open sourced in “The Flan Collection: Designing Data and Methods for Effective Instruction Tuning” (Longpre et al.). See Google AI Blogpost: “The Flan Collection: Advancing Open Source Methods for Instruction Tuning”. ## UL2 PreTraining The model is pretrained on the C4 corpus. For pretraining, the model is trained on a total of 1 trillion tokens on C4 (2 million steps) with a batch size of 1024. The sequence length is set to 512/512 for inputs and targets. Dropout is set to 0 during pretraining. Pre-training took slightly more than one month for about 1 trillion tokens. The model has 32 encoder layers and 32 decoder layers, `dmodel` of 4096 and `df` of 16384. The dimension of each head is 256 for a total of 16 heads. Our model uses a model parallelism of 8. The same sentencepiece tokenizer as T5 of vocab size 32000 is used (click [here](https://huggingface.co/docs/transformers/v4.20.0/en/model_doc/t5#transformers.T5Tokenizer) for more information about the T5 tokenizer). UL-20B can be interpreted as a model that is quite similar to T5 but trained with a different objective and slightly different scaling knobs. UL-20B was trained using the [Jax](https://github.com/google/jax) and [T5X](https://github.com/google-research/t5x) infrastructure. The training objective during pretraining is a mixture of different denoising strategies that are explained in the following: ### Mixture of Denoisers To quote the paper: > We conjecture that a strong universal model has to be exposed to solving diverse set of problems > during pre-training. Given that pre-training is done using self-supervision, we argue that such diversity > should be injected to the objective of the model, otherwise the model might suffer from lack a certain > ability, like long-coherent text generation. > Motivated by this, as well as current class of objective functions, we define three main paradigms that > are used during pre-training: - **R-Denoiser**: The regular denoising is the standard span corruption introduced in [T5](https://huggingface.co/docs/transformers/v4.20.0/en/model_doc/t5) that uses a range of 2 to 5 tokens as the span length, which masks about 15% of input tokens. These spans are short and potentially useful to acquire knowledge instead of learning to generate fluent text. - **S-Denoiser**: A specific case of denoising where we observe a strict sequential order when framing the inputs-to-targets task, i.e., prefix language modeling. To do so, we simply partition the input sequence into two sub-sequences of tokens as context and target such that the targets do not rely on future information. This is unlike standard span corruption where there could be a target token with earlier position than a context token. Note that similar to the Prefix-LM setup, the context (prefix) retains a bidirectional receptive field. We note that S-Denoising with very short memory or no memory is in similar spirit to standard causal language modeling. - **X-Denoiser**: An extreme version of denoising where the model must recover a large part of the input, given a small to moderate part of it. This simulates a situation where a model needs to generate long target from a memory with relatively limited information. To do so, we opt to include examples with aggressive denoising where approximately 50% of the input sequence is masked. This is by increasing the span length and/or corruption rate. We consider a pre-training task to be extreme if it has a long span (e.g., ≥ 12 tokens) or have a large corruption rate (e.g., ≥ 30%). X-denoising is motivated by being an interpolation between regular span corruption and language model like objectives. See the following diagram for a more visual explanation: ![mixture-of-denoisers](https://raw.githubusercontent.com/google-research/google-research/master/ul2/figs/mod.png) **Important**: For more details, please see sections 3.1.2 of the [paper](https://arxiv.org/pdf/2205.05131v1.pdf). ## Fine-tuning The model was continously fine-tuned after N pretraining steps where N is typically from 50k to 100k. In other words, after each Nk steps of pretraining, the model is finetuned on each downstream task. See section 5.2.2 of [paper](https://arxiv.org/pdf/2205.05131v1.pdf) to get an overview of all datasets that were used for fine-tuning). As the model is continuously finetuned, finetuning is stopped on a task once it has reached state-of-the-art to save compute. In total, the model was trained for 2.65 million steps. **Important**: For more details, please see sections 5.2.1 and 5.2.2 of the [paper](https://arxiv.org/pdf/2205.05131v1.pdf). # Contribution This model was originally contributed by [Yi Tay](https://www.yitay.net/?author=636616684c5e64780328eece), and added to the Hugging Face ecosystem by [Younes Belkada](https://huggingface.co/ybelkada) & [Arthur Zucker](https://huggingface.co/ArthurZ). # Citation If you want to cite this work, please consider citing the [blogpost](https://www.yitay.net/blog/flan-ul2-20b) announcing the release of `Flan-UL2`.
deepseek-ai/DeepSeek-Coder-V2-Instruct
deepseek-ai
"2024-08-21T06:42:50Z"
96,782
471
transformers
[ "transformers", "safetensors", "deepseek_v2", "text-generation", "conversational", "custom_code", "arxiv:2401.06066", "base_model:deepseek-ai/DeepSeek-Coder-V2-Base", "base_model:finetune:deepseek-ai/DeepSeek-Coder-V2-Base", "license:other", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
"2024-06-14T03:46:22Z"
--- license: other license_name: deepseek-license license_link: LICENSE base_model: deepseek-ai/DeepSeek-Coder-V2-Base --- <!-- markdownlint-disable first-line-h1 --> <!-- markdownlint-disable html --> <!-- markdownlint-disable no-duplicate-header --> <div align="center"> <img src="https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/logo.svg?raw=true" width="60%" alt="DeepSeek-V2" /> </div> <hr> <div align="center" style="line-height: 1;"> <a href="https://www.deepseek.com/" target="_blank" style="margin: 2px;"> <img alt="Homepage" src="https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/badge.svg?raw=true" style="display: inline-block; vertical-align: middle;"/> </a> <a href="https://chat.deepseek.com/" target="_blank" style="margin: 2px;"> <img alt="Chat" src="https://img.shields.io/badge/🤖%20Chat-DeepSeek%20V2-536af5?color=536af5&logoColor=white" style="display: inline-block; vertical-align: middle;"/> </a> <a href="https://huggingface.co/deepseek-ai" target="_blank" style="margin: 2px;"> <img alt="Hugging Face" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-DeepSeek%20AI-ffc107?color=ffc107&logoColor=white" style="display: inline-block; vertical-align: middle;"/> </a> </div> <div align="center" style="line-height: 1;"> <a href="https://discord.gg/Tc7c45Zzu5" target="_blank" style="margin: 2px;"> <img alt="Discord" src="https://img.shields.io/badge/Discord-DeepSeek%20AI-7289da?logo=discord&logoColor=white&color=7289da" style="display: inline-block; vertical-align: middle;"/> </a> <a href="https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/qr.jpeg?raw=true" target="_blank" style="margin: 2px;"> <img alt="Wechat" src="https://img.shields.io/badge/WeChat-DeepSeek%20AI-brightgreen?logo=wechat&logoColor=white" style="display: inline-block; vertical-align: middle;"/> </a> <a href="https://twitter.com/deepseek_ai" target="_blank" style="margin: 2px;"> <img alt="Twitter Follow" src="https://img.shields.io/badge/Twitter-deepseek_ai-white?logo=x&logoColor=white" style="display: inline-block; vertical-align: middle;"/> </a> </div> <div align="center" style="line-height: 1;"> <a href="https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-CODE" style="margin: 2px;"> <img alt="Code License" src="https://img.shields.io/badge/Code_License-MIT-f5de53?&color=f5de53" style="display: inline-block; vertical-align: middle;"/> </a> <a href="https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-MODEL" style="margin: 2px;"> <img alt="Model License" src="https://img.shields.io/badge/Model_License-Model_Agreement-f5de53?&color=f5de53" style="display: inline-block; vertical-align: middle;"/> </a> </div> <p align="center"> <a href="#4-api-platform">API Platform</a> | <a href="#5-how-to-run-locally">How to Use</a> | <a href="#6-license">License</a> | </p> <p align="center"> <a href="https://github.com/deepseek-ai/DeepSeek-Coder-V2/blob/main/paper.pdf"><b>Paper Link</b>👁️</a> </p> # DeepSeek-Coder-V2: Breaking the Barrier of Closed-Source Models in Code Intelligence ## 1. Introduction We present DeepSeek-Coder-V2, an open-source Mixture-of-Experts (MoE) code language model that achieves performance comparable to GPT4-Turbo in code-specific tasks. Specifically, DeepSeek-Coder-V2 is further pre-trained from an intermediate checkpoint of DeepSeek-V2 with additional 6 trillion tokens. Through this continued pre-training, DeepSeek-Coder-V2 substantially enhances the coding and mathematical reasoning capabilities of DeepSeek-V2, while maintaining comparable performance in general language tasks. Compared to DeepSeek-Coder-33B, DeepSeek-Coder-V2 demonstrates significant advancements in various aspects of code-related tasks, as well as reasoning and general capabilities. Additionally, DeepSeek-Coder-V2 expands its support for programming languages from 86 to 338, while extending the context length from 16K to 128K. <p align="center"> <img width="100%" src="https://github.com/deepseek-ai/DeepSeek-Coder-V2/blob/main/figures/performance.png?raw=true"> </p> In standard benchmark evaluations, DeepSeek-Coder-V2 achieves superior performance compared to closed-source models such as GPT4-Turbo, Claude 3 Opus, and Gemini 1.5 Pro in coding and math benchmarks. The list of supported programming languages can be found [here](https://github.com/deepseek-ai/DeepSeek-Coder-V2/blob/main/supported_langs.txt). ## 2. Model Downloads We release the DeepSeek-Coder-V2 with 16B and 236B parameters based on the [DeepSeekMoE](https://arxiv.org/pdf/2401.06066) framework, which has actived parameters of only 2.4B and 21B , including base and instruct models, to the public. <div align="center"> | **Model** | **#Total Params** | **#Active Params** | **Context Length** | **Download** | | :-----------------------------: | :---------------: | :----------------: | :----------------: | :----------------------------------------------------------: | | DeepSeek-Coder-V2-Lite-Base | 16B | 2.4B | 128k | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Base) | | DeepSeek-Coder-V2-Lite-Instruct | 16B | 2.4B | 128k | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct) | | DeepSeek-Coder-V2-Base | 236B | 21B | 128k | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Base) | | DeepSeek-Coder-V2-Instruct | 236B | 21B | 128k | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Instruct) | </div> ## 3. Chat Website You can chat with the DeepSeek-Coder-V2 on DeepSeek's official website: [coder.deepseek.com](https://coder.deepseek.com/sign_in) ## 4. API Platform We also provide OpenAI-Compatible API at DeepSeek Platform: [platform.deepseek.com](https://platform.deepseek.com/), and you can also pay-as-you-go at an unbeatable price. <p align="center"> <img width="40%" src="https://github.com/deepseek-ai/DeepSeek-Coder-V2/blob/main/figures/model_price.jpg?raw=true"> </p> ## 5. How to run locally **Here, we provide some examples of how to use DeepSeek-Coder-V2-Lite model. If you want to utilize DeepSeek-Coder-V2 in BF16 format for inference, 80GB*8 GPUs are required.** ### Inference with Huggingface's Transformers You can directly employ [Huggingface's Transformers](https://github.com/huggingface/transformers) for model inference. #### Code Completion ```python from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-Coder-V2-Lite-Base", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained("deepseek-ai/DeepSeek-Coder-V2-Lite-Base", trust_remote_code=True, torch_dtype=torch.bfloat16).cuda() input_text = "#write a quick sort algorithm" inputs = tokenizer(input_text, return_tensors="pt").to(model.device) outputs = model.generate(**inputs, max_length=128) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` #### Code Insertion ```python from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-Coder-V2-Lite-Base", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained("deepseek-ai/DeepSeek-Coder-V2-Lite-Base", trust_remote_code=True, torch_dtype=torch.bfloat16).cuda() input_text = """<|fim▁begin|>def quick_sort(arr): if len(arr) <= 1: return arr pivot = arr[0] left = [] right = [] <|fim▁hole|> if arr[i] < pivot: left.append(arr[i]) else: right.append(arr[i]) return quick_sort(left) + [pivot] + quick_sort(right)<|fim▁end|>""" inputs = tokenizer(input_text, return_tensors="pt").to(model.device) outputs = model.generate(**inputs, max_length=128) print(tokenizer.decode(outputs[0], skip_special_tokens=True)[len(input_text):]) ``` #### Chat Completion ```python from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained("deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct", trust_remote_code=True, torch_dtype=torch.bfloat16).cuda() messages=[ { 'role': 'user', 'content': "write a quick sort algorithm in python."} ] inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device) # tokenizer.eos_token_id is the id of <|end▁of▁sentence|> token outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, top_k=50, top_p=0.95, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id) print(tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)) ``` The complete chat template can be found within `tokenizer_config.json` located in the huggingface model repository. An example of chat template is as belows: ```bash <|begin▁of▁sentence|>User: {user_message_1} Assistant: {assistant_message_1}<|end▁of▁sentence|>User: {user_message_2} Assistant: ``` You can also add an optional system message: ```bash <|begin▁of▁sentence|>{system_message} User: {user_message_1} Assistant: {assistant_message_1}<|end▁of▁sentence|>User: {user_message_2} Assistant: ``` ### Inference with vLLM (recommended) To utilize [vLLM](https://github.com/vllm-project/vllm) for model inference, please merge this Pull Request into your vLLM codebase: https://github.com/vllm-project/vllm/pull/4650. ```python from transformers import AutoTokenizer from vllm import LLM, SamplingParams max_model_len, tp_size = 8192, 1 model_name = "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct" tokenizer = AutoTokenizer.from_pretrained(model_name) llm = LLM(model=model_name, tensor_parallel_size=tp_size, max_model_len=max_model_len, trust_remote_code=True, enforce_eager=True) sampling_params = SamplingParams(temperature=0.3, max_tokens=256, stop_token_ids=[tokenizer.eos_token_id]) messages_list = [ [{"role": "user", "content": "Who are you?"}], [{"role": "user", "content": "write a quick sort algorithm in python."}], [{"role": "user", "content": "Write a piece of quicksort code in C++."}], ] prompt_token_ids = [tokenizer.apply_chat_template(messages, add_generation_prompt=True) for messages in messages_list] outputs = llm.generate(prompt_token_ids=prompt_token_ids, sampling_params=sampling_params) generated_text = [output.outputs[0].text for output in outputs] print(generated_text) ``` ## 6. License This code repository is licensed under [the MIT License](https://github.com/deepseek-ai/DeepSeek-Coder-V2/blob/main/LICENSE-CODE). The use of DeepSeek-Coder-V2 Base/Instruct models is subject to [the Model License](https://github.com/deepseek-ai/DeepSeek-Coder-V2/blob/main/LICENSE-MODEL). DeepSeek-Coder-V2 series (including Base and Instruct) supports commercial use. ## 7. Contact If you have any questions, please raise an issue or contact us at [service@deepseek.com](service@deepseek.com).
prajjwal1/bert-mini
prajjwal1
"2021-10-27T18:27:38Z"
96,451
20
transformers
[ "transformers", "pytorch", "BERT", "MNLI", "NLI", "transformer", "pre-training", "en", "arxiv:1908.08962", "arxiv:2110.01518", "license:mit", "endpoints_compatible", "region:us" ]
null
"2022-03-02T23:29:05Z"
--- language: - en license: - mit tags: - BERT - MNLI - NLI - transformer - pre-training --- The following model is a Pytorch pre-trained model obtained from converting Tensorflow checkpoint found in the [official Google BERT repository](https://github.com/google-research/bert). This is one of the smaller pre-trained BERT variants, together with [bert-small](https://huggingface.co/prajjwal1/bert-small) and [bert-medium](https://huggingface.co/prajjwal1/bert-medium). They were introduced in the study `Well-Read Students Learn Better: On the Importance of Pre-training Compact Models` ([arxiv](https://arxiv.org/abs/1908.08962)), and ported to HF for the study `Generalization in NLI: Ways (Not) To Go Beyond Simple Heuristics` ([arXiv](https://arxiv.org/abs/2110.01518)). These models are supposed to be trained on a downstream task. If you use the model, please consider citing both the papers: ``` @misc{bhargava2021generalization, title={Generalization in NLI: Ways (Not) To Go Beyond Simple Heuristics}, author={Prajjwal Bhargava and Aleksandr Drozd and Anna Rogers}, year={2021}, eprint={2110.01518}, archivePrefix={arXiv}, primaryClass={cs.CL} } @article{DBLP:journals/corr/abs-1908-08962, author = {Iulia Turc and Ming{-}Wei Chang and Kenton Lee and Kristina Toutanova}, title = {Well-Read Students Learn Better: The Impact of Student Initialization on Knowledge Distillation}, journal = {CoRR}, volume = {abs/1908.08962}, year = {2019}, url = {http://arxiv.org/abs/1908.08962}, eprinttype = {arXiv}, eprint = {1908.08962}, timestamp = {Thu, 29 Aug 2019 16:32:34 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-1908-08962.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` Config of this model: `prajjwal1/bert-mini` (L=4, H=256) [Model Link](https://huggingface.co/prajjwal1/bert-mini) Other models to check out: - `prajjwal1/bert-tiny` (L=2, H=128) [Model Link](https://huggingface.co/prajjwal1/bert-tiny) - `prajjwal1/bert-small` (L=4, H=512) [Model Link](https://huggingface.co/prajjwal1/bert-small) - `prajjwal1/bert-medium` (L=8, H=512) [Model Link](https://huggingface.co/prajjwal1/bert-medium) Original Implementation and more info can be found in [this Github repository](https://github.com/prajjwal1/generalize_lm_nli). Twitter: [@prajjwal_1](https://twitter.com/prajjwal_1)
kandinsky-community/kandinsky-2-1
kandinsky-community
"2023-10-09T11:33:20Z"
96,392
36
diffusers
[ "diffusers", "safetensors", "text-to-image", "kandinsky", "license:apache-2.0", "diffusers:KandinskyPipeline", "region:us" ]
text-to-image
"2023-05-24T09:52:07Z"
--- license: apache-2.0 prior: - kandinsky-community/kandinsky-2-1-prior tags: - text-to-image - kandinsky inference: false --- # Kandinsky 2.1 Kandinsky 2.1 inherits best practices from Dall-E 2 and Latent diffusion while introducing some new ideas. It uses the CLIP model as a text and image encoder, and diffusion image prior (mapping) between latent spaces of CLIP modalities. This approach increases the visual performance of the model and unveils new horizons in blending images and text-guided image manipulation. The Kandinsky model is created by [Arseniy Shakhmatov](https://github.com/cene555), [Anton Razzhigaev](https://github.com/razzant), [Aleksandr Nikolich](https://github.com/AlexWortega), [Igor Pavlov](https://github.com/boomb0om), [Andrey Kuznetsov](https://github.com/kuznetsoffandrey) and [Denis Dimitrov](https://github.com/denndimitrov) ## Usage Kandinsky 2.1 is available in diffusers! ```python pip install diffusers transformers accelerate ``` ### Text to image ```python from diffusers import AutoPipelineForText2Image import torch pipe = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting" negative_prompt = "low quality, bad quality" image = pipe(prompt=prompt, negative_prompt=negative_prompt, prior_guidance_scale =1.0, height=768, width=768).images[0] image.save("cheeseburger_monster.png") ``` ![img](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinsky-docs/cheeseburger.png) ### Text Guided Image-to-Image Generation ```python from diffusers import AutoPipelineForImage2Image import torch import requests from io import BytesIO from PIL import Image import os pipe = AutoPipelineForImage2Image.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = "A fantasy landscape, Cinematic lighting" negative_prompt = "low quality, bad quality" url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" response = requests.get(url) original_image = Image.open(BytesIO(response.content)).convert("RGB") original_image.thumbnail((768, 768)) image = pipe(prompt=prompt, image=original_image, strength=0.3).images[0] out.images[0].save("fantasy_land.png") ``` ![img](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinsky-docs/img2img_fantasyland.png) ### Interpolate ```python from diffusers import KandinskyPriorPipeline, KandinskyPipeline from diffusers.utils import load_image import PIL import torch pipe_prior = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 ) pipe_prior.to("cuda") img1 = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) img2 = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/starry_night.jpeg" ) # add all the conditions we want to interpolate, can be either text or image images_texts = ["a cat", img1, img2] # specify the weights for each condition in images_texts weights = [0.3, 0.3, 0.4] # We can leave the prompt empty prompt = "" prior_out = pipe_prior.interpolate(images_texts, weights) pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16) pipe.to("cuda") image = pipe(prompt, **prior_out, height=768, width=768).images[0] image.save("starry_cat.png") ``` ![img](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinsky-docs/starry_cat.png) ## Model Architecture ### Overview Kandinsky 2.1 is a text-conditional diffusion model based on unCLIP and latent diffusion, composed of a transformer-based image prior model, a unet diffusion model, and a decoder. The model architectures are illustrated in the figure below - the chart on the left describes the process to train the image prior model, the figure in the center is the text-to-image generation process, and the figure on the right is image interpolation. <p float="left"> <img src="https://raw.githubusercontent.com/ai-forever/Kandinsky-2/main/content/kandinsky21.png"/> </p> Specifically, the image prior model was trained on CLIP text and image embeddings generated with a pre-trained [mCLIP model](https://huggingface.co/M-CLIP/XLM-Roberta-Large-Vit-L-14). The trained image prior model is then used to generate mCLIP image embeddings for input text prompts. Both the input text prompts and its mCLIP image embeddings are used in the diffusion process. A [MoVQGAN](https://openreview.net/forum?id=Qb-AoSw4Jnm) model acts as the final block of the model, which decodes the latent representation into an actual image. ### Details The image prior training of the model was performed on the [LAION Improved Aesthetics dataset](https://huggingface.co/datasets/bhargavsdesai/laion_improved_aesthetics_6.5plus_with_images), and then fine-tuning was performed on the [LAION HighRes data](https://huggingface.co/datasets/laion/laion-high-resolution). The main Text2Image diffusion model was trained on the basis of 170M text-image pairs from the [LAION HighRes dataset](https://huggingface.co/datasets/laion/laion-high-resolution) (an important condition was the presence of images with a resolution of at least 768x768). The use of 170M pairs is due to the fact that we kept the UNet diffusion block from Kandinsky 2.0, which allowed us not to train it from scratch. Further, at the stage of fine-tuning, a dataset of 2M very high-quality high-resolution images with descriptions (COYO, anime, landmarks_russia, and a number of others) was used separately collected from open sources. ### Evaluation We quantitatively measure the performance of Kandinsky 2.1 on the COCO_30k dataset, in zero-shot mode. The table below presents FID. FID metric values ​​for generative models on COCO_30k | | FID (30k)| |:------|----:| | eDiff-I (2022) | 6.95 | | Image (2022) | 7.27 | | Kandinsky 2.1 (2023) | 8.21| | Stable Diffusion 2.1 (2022) | 8.59 | | GigaGAN, 512x512 (2023) | 9.09 | | DALL-E 2 (2022) | 10.39 | | GLIDE (2022) | 12.24 | | Kandinsky 1.0 (2022) | 15.40 | | DALL-E (2021) | 17.89 | | Kandinsky 2.0 (2022) | 20.00 | | GLIGEN (2022) | 21.04 | For more information, please refer to the upcoming technical report. ## BibTex If you find this repository useful in your research, please cite: ``` @misc{kandinsky 2.1, title = {kandinsky 2.1}, author = {Arseniy Shakhmatov, Anton Razzhigaev, Aleksandr Nikolich, Vladimir Arkhipkin, Igor Pavlov, Andrey Kuznetsov, Denis Dimitrov}, year = {2023}, howpublished = {}, } ```
Rostlab/prot_t5_xl_half_uniref50-enc
Rostlab
"2023-01-31T21:04:38Z"
96,308
14
transformers
[ "transformers", "pytorch", "t5", "protein language model", "dataset:UniRef50", "text-generation-inference", "endpoints_compatible", "region:us" ]
null
"2022-05-20T09:58:28Z"
--- tags: - protein language model datasets: - UniRef50 --- # Encoder only ProtT5-XL-UniRef50, half-precision model An encoder-only, half-precision version of the [ProtT5-XL-UniRef50](https://huggingface.co/Rostlab/prot_t5_xl_uniref50) model. The original model and it's pretraining were introduced in [this paper](https://doi.org/10.1101/2020.07.12.199554) and first released in [this repository](https://github.com/agemagician/ProtTrans). This model is trained on uppercase amino acids: it only works with capital letter amino acids. ## Model description ProtT5-XL-UniRef50 is based on the `t5-3b` model and was pretrained on a large corpus of protein sequences in a self-supervised fashion. This means it was pretrained on the raw protein sequences only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those protein sequences. One important difference between this T5 model and the original T5 version is the denoising objective. The original T5-3B model was pretrained using a span denoising objective, while this model was pretrained with a Bart-like MLM denoising objective. The masking probability is consistent with the original T5 training by randomly masking 15% of the amino acids in the input. This model only contains the encoder portion of the original ProtT5-XL-UniRef50 model using half precision (float16). As such, this model can efficiently be used to create protein/ amino acid representations. When used for training downstream networks/ feature extraction, these embeddings produced the same performance (established empirically by comparing on several downstream tasks). ## Intended uses & limitations This version of the original ProtT5-XL-UniRef50 is mostly meant for conveniently creating amino-acid or protein embeddings with a low GPU-memory footprint without any measurable performance-decrease in our experiments. This model is fully usable on 8 GB of video RAM. ### How to use An extensive, interactive example on how to use this model for common tasks can be found [on Google Colab](https://colab.research.google.com/drive/1TUj-ayG3WO52n5N50S7KH9vtt6zRkdmj?usp=sharing#scrollTo=ET2v51slC5ui) Here is how to use this model to extract the features of a given protein sequence in PyTorch: ```python sequence_examples = ["PRTEINO", "SEQWENCE"] # this will replace all rare/ambiguous amino acids by X and introduce white-space between all amino acids sequence_examples = [" ".join(list(re.sub(r"[UZOB]", "X", sequence))) for sequence in sequence_examples] # tokenize sequences and pad up to the longest sequence in the batch ids = tokenizer.batch_encode_plus(sequence_examples, add_special_tokens=True, padding="longest") input_ids = torch.tensor(ids['input_ids']).to(device) attention_mask = torch.tensor(ids['attention_mask']).to(device) # generate embeddings with torch.no_grad(): embedding_repr = model(input_ids=input_ids,attention_mask=attention_mask) # extract embeddings for the first ([0,:]) sequence in the batch while removing padded & special tokens ([0,:7]) emb_0 = embedding_repr.last_hidden_state[0,:7] # shape (7 x 1024) print(f"Shape of per-residue embedding of first sequences: {emb_0.shape}") # do the same for the second ([1,:]) sequence in the batch while taking into account different sequence lengths ([1,:8]) emb_1 = embedding_repr.last_hidden_state[1,:8] # shape (8 x 1024) # if you want to derive a single representation (per-protein embedding) for the whole protein emb_0_per_protein = emb_0.mean(dim=0) # shape (1024) print(f"Shape of per-protein embedding of first sequences: {emb_0_per_protein.shape}") ``` **NOTE**: Please make sure to explicitly set the model to `float16` (`T5EncoderModel.from_pretrained('Rostlab/prot_t5_xl_half_uniref50-enc', torch_dtype=torch.float16)`) otherwise, the generated embeddings will be full precision. **NOTE**: Currently (06/2022) half-precision models cannot be used on CPU. If you want to use the encoder only version on CPU, you need to cast it to its full-precision version (`model=model.float()`). ### BibTeX entry and citation info ```bibtex @article {Elnaggar2020.07.12.199554, author = {Elnaggar, Ahmed and Heinzinger, Michael and Dallago, Christian and Rehawi, Ghalia and Wang, Yu and Jones, Llion and Gibbs, Tom and Feher, Tamas and Angerer, Christoph and Steinegger, Martin and BHOWMIK, DEBSINDHU and Rost, Burkhard}, title = {ProtTrans: Towards Cracking the Language of Life{\textquoteright}s Code Through Self-Supervised Deep Learning and High Performance Computing}, elocation-id = {2020.07.12.199554}, year = {2020}, doi = {10.1101/2020.07.12.199554}, publisher = {Cold Spring Harbor Laboratory}, abstract = {Computational biology and bioinformatics provide vast data gold-mines from protein sequences, ideal for Language Models (LMs) taken from Natural Language Processing (NLP). These LMs reach for new prediction frontiers at low inference costs. Here, we trained two auto-regressive language models (Transformer-XL, XLNet) and two auto-encoder models (Bert, Albert) on data from UniRef and BFD containing up to 393 billion amino acids (words) from 2.1 billion protein sequences (22- and 112 times the entire English Wikipedia). The LMs were trained on the Summit supercomputer at Oak Ridge National Laboratory (ORNL), using 936 nodes (total 5616 GPUs) and one TPU Pod (V3-512 or V3-1024). We validated the advantage of up-scaling LMs to larger models supported by bigger data by predicting secondary structure (3-states: Q3=76-84, 8 states: Q8=65-73), sub-cellular localization for 10 cellular compartments (Q10=74) and whether a protein is membrane-bound or water-soluble (Q2=89). Dimensionality reduction revealed that the LM-embeddings from unlabeled data (only protein sequences) captured important biophysical properties governing protein shape. This implied learning some of the grammar of the language of life realized in protein sequences. The successful up-scaling of protein LMs through HPC to larger data sets slightly reduced the gap between models trained on evolutionary information and LMs. Availability ProtTrans: \&lt;a href="https://github.com/agemagician/ProtTrans"\&gt;https://github.com/agemagician/ProtTrans\&lt;/a\&gt;Competing Interest StatementThe authors have declared no competing interest.}, URL = {https://www.biorxiv.org/content/early/2020/07/21/2020.07.12.199554}, eprint = {https://www.biorxiv.org/content/early/2020/07/21/2020.07.12.199554.full.pdf}, journal = {bioRxiv} } ```
microsoft/trocr-base-printed
microsoft
"2024-05-27T20:11:53Z"
96,108
145
transformers
[ "transformers", "pytorch", "safetensors", "vision-encoder-decoder", "trocr", "image-to-text", "arxiv:2109.10282", "endpoints_compatible", "region:us" ]
image-to-text
"2022-03-02T23:29:05Z"
--- tags: - trocr - image-to-text widget: - src: https://layoutlm.blob.core.windows.net/trocr/dataset/SROIE2019Task2Crop/train/X00016469612_1.jpg example_title: Printed 1 - src: https://layoutlm.blob.core.windows.net/trocr/dataset/SROIE2019Task2Crop/train/X51005255805_7.jpg example_title: Printed 2 - src: https://layoutlm.blob.core.windows.net/trocr/dataset/SROIE2019Task2Crop/train/X51005745214_6.jpg example_title: Printed 3 --- # TrOCR (base-sized model, fine-tuned on SROIE) TrOCR model fine-tuned on the [SROIE dataset](https://rrc.cvc.uab.es/?ch=13). It was introduced in the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Li et al. and first released in [this repository](https://github.com/microsoft/unilm/tree/master/trocr). Disclaimer: The team releasing TrOCR did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The TrOCR model is an encoder-decoder model, consisting of an image Transformer as encoder, and a text Transformer as decoder. The image encoder was initialized from the weights of BEiT, while the text decoder was initialized from the weights of RoBERTa. Images are presented to the model as a sequence of fixed-size patches (resolution 16x16), which are linearly embedded. One also adds absolute position embeddings before feeding the sequence to the layers of the Transformer encoder. Next, the Transformer text decoder autoregressively generates tokens. ## Intended uses & limitations You can use the raw model for optical character recognition (OCR) on single text-line images. See the [model hub](https://huggingface.co/models?search=microsoft/trocr) to look for fine-tuned versions on a task that interests you. ### How to use Here is how to use this model in PyTorch: ```python from transformers import TrOCRProcessor, VisionEncoderDecoderModel from PIL import Image import requests # load image from the IAM database (actually this model is meant to be used on printed text) url = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' image = Image.open(requests.get(url, stream=True).raw).convert("RGB") processor = TrOCRProcessor.from_pretrained('microsoft/trocr-base-printed') model = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-base-printed') pixel_values = processor(images=image, return_tensors="pt").pixel_values generated_ids = model.generate(pixel_values) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] ``` ### BibTeX entry and citation info ```bibtex @misc{li2021trocr, title={TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models}, author={Minghao Li and Tengchao Lv and Lei Cui and Yijuan Lu and Dinei Florencio and Cha Zhang and Zhoujun Li and Furu Wei}, year={2021}, eprint={2109.10282}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
distil-whisper/distil-large-v2
distil-whisper
"2024-03-21T19:32:46Z"
95,630
503
transformers
[ "transformers", "pytorch", "jax", "tensorboard", "onnx", "safetensors", "whisper", "automatic-speech-recognition", "audio", "transformers.js", "en", "arxiv:2311.00430", "arxiv:2210.13352", "license:mit", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
"2023-10-24T15:48:32Z"
--- language: - en tags: - audio - automatic-speech-recognition - transformers.js widget: - example_title: LibriSpeech sample 1 src: https://cdn-media.huggingface.co/speech_samples/sample1.flac - example_title: LibriSpeech sample 2 src: https://cdn-media.huggingface.co/speech_samples/sample2.flac pipeline_tag: automatic-speech-recognition license: mit library_name: transformers --- # Distil-Whisper: distil-large-v2 Distil-Whisper was proposed in the paper [Robust Knowledge Distillation via Large-Scale Pseudo Labelling](https://arxiv.org/abs/2311.00430). It is a distilled version of the Whisper model that is **6 times faster**, 49% smaller, and performs **within 1% WER** on out-of-distribution evaluation sets. This is the repository for distil-large-v2, a distilled variant of [Whisper large-v2](https://huggingface.co/openai/whisper-large-v2). | Model | Params / M | Rel. Latency ↑ | Short-Form WER ↓ | Long-Form WER ↓ | |----------------------------------------------------------------------------|------------|----------------|------------------|-----------------| | [large-v3](https://huggingface.co/openai/whisper-large-v3) | 1550 | 1.0 | **8.4** | 11.0 | | [large-v2](https://huggingface.co/openai/whisper-large-v2) | 1550 | 1.0 | 9.1 | 11.7 | | | | | | | | [distil-large-v3](https://huggingface.co/distil-whisper/distil-large-v3) | 756 | 6.3 | 9.7 | **10.8** | | [distil-large-v2](https://huggingface.co/distil-whisper/distil-large-v2) | 756 | 5.8 | 10.1 | 11.6 | | [distil-medium.en](https://huggingface.co/distil-whisper/distil-medium.en) | 394 | **6.8** | 11.1 | 12.4 | | [distil-small.en](https://huggingface.co/distil-whisper/distil-small.en) | **166** | 5.6 | 12.1 | 12.8 | <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><b>Update:</b> following the release of OpenAI's Whisper large-v3, an updated <a href="ttps://huggingface.co/distil-whisper/distil-large-v3"> distil-large-v3</a> model was published. This <a href="ttps://huggingface.co/distil-whisper/distil-large-v3"> distil-large-v3</a> model surpasses the performance of the distil-large-v2 model, with no architecture changes and better support for sequential long-form generation. Thus, it is recommended that the <a href="ttps://huggingface.co/distil-whisper/distil-large-v3"> distil-large-v3</a> model is used in-place of the large-v2 model. </p> </div> **Note:** Distil-Whisper is currently only available for English speech recognition. We are working with the community to distill Whisper on other languages. If you are interested in distilling Whisper in your language, check out the provided [training code](https://github.com/huggingface/distil-whisper/tree/main/training). We will update the [Distil-Whisper repository](https://github.com/huggingface/distil-whisper/) with multilingual checkpoints when ready! ## Usage Distil-Whisper is supported in Hugging Face 🤗 Transformers from version 4.35 onwards. To run the model, first install the latest version of the Transformers library. For this example, we'll also install 🤗 Datasets to load toy audio dataset from the Hugging Face Hub: ```bash pip install --upgrade pip pip install --upgrade transformers accelerate datasets[audio] ``` ### Short-Form Transcription The model can be used with the [`pipeline`](https://huggingface.co/docs/transformers/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline) class to transcribe short-form audio files (< 30-seconds) as follows: ```python import torch from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline from datasets import load_dataset device = "cuda:0" if torch.cuda.is_available() else "cpu" torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 model_id = "distil-whisper/distil-large-v2" model = AutoModelForSpeechSeq2Seq.from_pretrained( model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True ) model.to(device) processor = AutoProcessor.from_pretrained(model_id) pipe = pipeline( "automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, torch_dtype=torch_dtype, device=device, ) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") sample = dataset[0]["audio"] result = pipe(sample) print(result["text"]) ``` To transcribe a local audio file, simply pass the path to your audio file when you call the pipeline: ```diff - result = pipe(sample) + result = pipe("audio.mp3") ``` ### Long-Form Transcription Distil-Whisper uses a chunked algorithm to transcribe long-form audio files (> 30-seconds). In practice, this chunked long-form algorithm is 9x faster than the sequential algorithm proposed by OpenAI in the Whisper paper (see Table 7 of the [Distil-Whisper paper](https://arxiv.org/abs/2311.00430)). To enable chunking, pass the `chunk_length_s` parameter to the `pipeline`. For Distil-Whisper, a chunk length of 15-seconds is optimal. To activate batching, pass the argument `batch_size`: ```python import torch from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline from datasets import load_dataset device = "cuda:0" if torch.cuda.is_available() else "cpu" torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 model_id = "distil-whisper/distil-large-v2" model = AutoModelForSpeechSeq2Seq.from_pretrained( model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True ) model.to(device) processor = AutoProcessor.from_pretrained(model_id) pipe = pipeline( "automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, chunk_length_s=15, batch_size=16, torch_dtype=torch_dtype, device=device, ) dataset = load_dataset("distil-whisper/librispeech_long", "clean", split="validation") sample = dataset[0]["audio"] result = pipe(sample) print(result["text"]) ``` <!--- **Tip:** The pipeline can also be used to transcribe an audio file from a remote URL, for example: ```python result = pipe("https://huggingface.co/datasets/sanchit-gandhi/librispeech_long/resolve/main/audio.wav") ``` ---> ### Speculative Decoding Distil-Whisper can be used as an assistant model to Whisper for [speculative decoding](https://huggingface.co/blog/whisper-speculative-decoding). Speculative decoding mathematically ensures the exact same outputs as Whisper are obtained while being 2 times faster. This makes it the perfect drop-in replacement for existing Whisper pipelines, since the same outputs are guaranteed. In the following code-snippet, we load the assistant Distil-Whisper model standalone to the main Whisper pipeline. We then specify it as the "assistant model" for generation: ```python from transformers import pipeline, AutoModelForCausalLM, AutoModelForSpeechSeq2Seq, AutoProcessor import torch from datasets import load_dataset device = "cuda:0" if torch.cuda.is_available() else "cpu" torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 assistant_model_id = "distil-whisper/distil-large-v2" assistant_model = AutoModelForCausalLM.from_pretrained( assistant_model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True ) assistant_model.to(device) model_id = "openai/whisper-large-v2" model = AutoModelForSpeechSeq2Seq.from_pretrained( model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True ) model.to(device) processor = AutoProcessor.from_pretrained(model_id) pipe = pipeline( "automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, generate_kwargs={"assistant_model": assistant_model}, torch_dtype=torch_dtype, device=device, ) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") sample = dataset[0]["audio"] result = pipe(sample) print(result["text"]) ``` ## Additional Speed & Memory Improvements You can apply additional speed and memory improvements to Distil-Whisper which we cover in the following. ### Flash Attention We recommend using [Flash-Attention 2](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#flashattention-2) if your GPU allows for it. To do so, you first need to install [Flash Attention](https://github.com/Dao-AILab/flash-attention): ``` pip install flash-attn --no-build-isolation ``` and then all you have to do is to pass `use_flash_attention_2=True` to `from_pretrained`: ```diff - model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True) + model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, use_flash_attention_2=True) ``` ### Torch Scale-Product-Attention (SDPA) If your GPU does not support Flash Attention, we recommend making use of [BetterTransformers](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#bettertransformer). To do so, you first need to install optimum: ``` pip install --upgrade optimum ``` And then convert your model to a "BetterTransformer" model before using it: ```diff model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True) + model = model.to_bettertransformer() ``` ### Running Distil-Whisper in `openai-whisper` To use the model in the original Whisper format, first ensure you have the [`openai-whisper`](https://pypi.org/project/openai-whisper/) package installed: ```bash pip install --upgrade openai-whisper ``` The following code-snippet demonstrates how to transcribe a sample file from the LibriSpeech dataset loaded using 🤗 Datasets: ```python import torch from datasets import load_dataset from huggingface_hub import hf_hub_download from whisper import load_model, transcribe distil_large_v2 = hf_hub_download(repo_id="distil-whisper/distil-large-v2", filename="original-model.bin") model = load_model(distil_large_v2) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") sample = dataset[0]["audio"]["array"] sample = torch.from_numpy(sample).float() pred_out = transcribe(model, audio=sample) print(pred_out["text"]) ``` To transcribe a local audio file, simply pass the path to the audio file as the `audio` argument to transcribe: ```python pred_out = transcribe(model, audio="audio.mp3") ``` ### Whisper.cpp Distil-Whisper can be run from the [Whisper.cpp](https://github.com/ggerganov/whisper.cpp) repository with the original sequential long-form transcription algorithm. In a [provisional benchmark](https://github.com/ggerganov/whisper.cpp/pull/1424#issuecomment-1793513399) on Mac M1, `distil-large-v2` is 2x faster than `large-v2`, while performing to within 0.1% WER over long-form audio. Note that future releases of Distil-Whisper will target faster CPU inference more! By distilling smaller encoders, we aim to achieve similar speed-ups to what we obtain on GPU. Steps for getting started: 1. Clone the Whisper.cpp repository: ``` git clone https://github.com/ggerganov/whisper.cpp.git cd whisper.cpp ``` 2. Download the ggml weights for `distil-medium.en` from the Hugging Face Hub: ```bash python -c "from huggingface_hub import hf_hub_download; hf_hub_download(repo_id='distil-whisper/distil-large-v2', filename='ggml-large-32-2.en.bin', local_dir='./models')" ``` Note that if you do not have the `huggingface_hub` package installed, you can also download the weights with `wget`: ```bash wget https://huggingface.co/distil-whisper/distil-large-v2/resolve/main/ggml-large-32-2.en.bin -P ./models ``` 3. Run inference using the provided sample audio: ```bash make -j && ./main -m models/ggml-large-32-2.en.bin -f samples/jfk.wav ``` ### Transformers.js ```js import { pipeline } from '@xenova/transformers'; let transcriber = await pipeline('automatic-speech-recognition', 'distil-whisper/distil-large-v2'); let url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav'; let output = await transcriber(url); // { text: " And so, my fellow Americans, ask not what your country can do for you. Ask what you can do for your country." } ``` See the [docs](https://huggingface.co/docs/transformers.js/api/pipelines#module_pipelines.AutomaticSpeechRecognitionPipeline) for more information. *Note:* Due to the large model size, we recommend running this model server-side with [Node.js](https://huggingface.co/docs/transformers.js/guides/node-audio-processing) (instead of in-browser). ### Candle Through an integration with Hugging Face [Candle](https://github.com/huggingface/candle/tree/main) 🕯️, Distil-Whisper is now available in the Rust library 🦀 Benefit from: * Optimised CPU backend with optional MKL support for x86 and Accelerate for Macs * CUDA backend for efficiently running on GPUs, multiple GPU distribution via NCCL * WASM support: run Distil-Whisper in a browser Steps for getting started: 1. Install [`candle-core`](https://github.com/huggingface/candle/tree/main/candle-core) as explained [here](https://huggingface.github.io/candle/guide/installation.html) 2. Clone the `candle` repository locally: ``` git clone https://github.com/huggingface/candle.git ``` 3. Enter the example directory for [Whisper](https://github.com/huggingface/candle/tree/main/candle-examples/examples/whisper): ``` cd candle/candle-examples/examples/whisper ``` 4. Run an example: ``` cargo run --example whisper --release -- --model distil-large-v2 ``` 5. To specify your own audio file, add the `--input` flag: ``` cargo run --example whisper --release -- --model distil-large-v2 --input audio.wav ``` ### 8bit & 4bit Quantization Coming soon ... ### Whisper.cpp Coming soon ... ## Model Details Distil-Whisper inherits the encoder-decoder architecture from Whisper. The encoder maps a sequence of speech vector inputs to a sequence of hidden-state vectors. The decoder auto-regressively predicts text tokens, conditional on all previous tokens and the encoder hidden-states. Consequently, the encoder is only run forward once, whereas the decoder is run as many times as the number of tokens generated. In practice, this means the decoder accounts for over 90% of total inference time. Thus, to optimise for latency, the focus should be on minimising the inference time of the decoder. To distill the Whisper model, we reduce the number of decoder layers while keeping the encoder fixed. The encoder (shown in green) is entirely copied from the teacher to the student and frozen during training. The student's decoder consists of only two decoder layers, which are initialised from the first and last decoder layer of the teacher (shown in red). All other decoder layers of the teacher are discarded. The model is then trained on a weighted sum of the KL divergence and pseudo-label loss terms. <p align="center"> <img src="https://huggingface.co/datasets/distil-whisper/figures/resolve/main/architecture.png?raw=true" width="600"/> </p> ## Evaluation The following code-snippets demonstrates how to evaluate the Distil-Whisper model on the LibriSpeech validation.clean dataset with [streaming mode](https://huggingface.co/blog/audio-datasets#streaming-mode-the-silver-bullet), meaning no audio data has to be downloaded to your local device. First, we need to install the required packages, including 🤗 Datasets to stream and load the audio data, and 🤗 Evaluate to perform the WER calculation: ```bash pip install --upgrade pip pip install --upgrade transformers datasets[audio] evaluate jiwer ``` Evaluation can then be run end-to-end with the following example: ```python from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor from transformers.models.whisper.english_normalizer import EnglishTextNormalizer from datasets import load_dataset from evaluate import load import torch from tqdm import tqdm # define our torch configuration device = "cuda:0" if torch.cuda.is_available() else "cpu" torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 model_id = "distil-whisper/distil-large-v2" # load the model + processor model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype, use_safetensors=True, low_cpu_mem_usage=True) model = model.to(device) processor = AutoProcessor.from_pretrained(model_id) # load the dataset with streaming mode dataset = load_dataset("librispeech_asr", "clean", split="validation", streaming=True) # define the evaluation metric wer_metric = load("wer") normalizer = EnglishTextNormalizer(processor.tokenizer.english_spelling_normalizer) def inference(batch): # 1. Pre-process the audio data to log-mel spectrogram inputs audio = [sample["array"] for sample in batch["audio"]] input_features = processor(audio, sampling_rate=batch["audio"][0]["sampling_rate"], return_tensors="pt").input_features input_features = input_features.to(device, dtype=torch_dtype) # 2. Auto-regressively generate the predicted token ids pred_ids = model.generate(input_features, max_new_tokens=128, language="en", task="transcribe") # 3. Decode the token ids to the final transcription batch["transcription"] = processor.batch_decode(pred_ids, skip_special_tokens=True) batch["reference"] = batch["text"] return batch dataset = dataset.map(function=inference, batched=True, batch_size=16) all_transcriptions = [] all_references = [] # iterate over the dataset and run inference for i, result in tqdm(enumerate(dataset), desc="Evaluating..."): all_transcriptions.append(result["transcription"]) all_references.append(result["reference"]) # normalize predictions and references all_transcriptions = [normalizer(transcription) for transcription in all_transcriptions] all_references = [normalizer(reference) for reference in all_references] # compute the WER metric wer = 100 * wer_metric.compute(predictions=all_transcriptions, references=all_references) print(wer) ``` **Print Output:** ``` 2.983685535968466 ``` ## Intended Use Distil-Whisper is intended to be a drop-in replacement for Whisper on English speech recognition. In particular, it achieves comparable WER results over out-of-distribution test data, while being 6x faster over both short and long-form audio. ## Data Distil-Whisper is trained on 22,000 hours of audio data from 9 open-source, permissively licensed speech datasets on the Hugging Face Hub: | Dataset | Size / h | Speakers | Domain | Licence | |-----------------------------------------------------------------------------------------|----------|----------|-----------------------------|-----------------| | [People's Speech](https://huggingface.co/datasets/MLCommons/peoples_speech) | 12,000 | unknown | Internet Archive | CC-BY-SA-4.0 | | [Common Voice 13](https://huggingface.co/datasets/mozilla-foundation/common_voice_13_0) | 3,000 | unknown | Narrated Wikipedia | CC0-1.0 | | [GigaSpeech](https://huggingface.co/datasets/speechcolab/gigaspeech) | 2,500 | unknown | Audiobook, podcast, YouTube | apache-2.0 | | Fisher | 1,960 | 11,900 | Telephone conversations | LDC | | [LibriSpeech](https://huggingface.co/datasets/librispeech_asr) | 960 | 2,480 | Audiobooks | CC-BY-4.0 | | [VoxPopuli](https://huggingface.co/datasets/facebook/voxpopuli) | 540 | 1,310 | European Parliament | CC0 | | [TED-LIUM](https://huggingface.co/datasets/LIUM/tedlium) | 450 | 2,030 | TED talks | CC-BY-NC-ND 3.0 | | SwitchBoard | 260 | 540 | Telephone conversations | LDC | | [AMI](https://huggingface.co/datasets/edinburghcstr/ami) | 100 | unknown | Meetings | CC-BY-4.0 | |||||| | **Total** | 21,770 | 18,260+ | | | The combined dataset spans 10 distinct domains and over 50k speakers. The diversity of this dataset is crucial to ensuring the distilled model is robust to audio distributions and noise. The audio data is then pseudo-labelled using the Whisper large-v2 model: we use Whisper to generate predictions for all the audio in our training set and use these as the target labels during training. Using pseudo-labels ensures that the transcriptions are consistently formatted across datasets and provides sequence-level distillation signal during training. ## WER Filter The Whisper pseudo-label predictions are subject to mis-transcriptions and hallucinations. To ensure we only train on accurate pseudo-labels, we employ a simple WER heuristic during training. First, we normalise the Whisper pseudo-labels and the ground truth labels provided by each dataset. We then compute the WER between these labels. If the WER exceeds a specified threshold, we discard the training example. Otherwise, we keep it for training. Section 9.2 of the [Distil-Whisper paper](https://arxiv.org/abs/2311.00430) demonstrates the effectiveness of this filter for improving downstream performance of the distilled model. We also partially attribute Distil-Whisper's robustness to hallucinations to this filter. ## Training The model was trained for 80,000 optimisation steps (or eight epochs). The Tensorboard training logs can be found under: https://huggingface.co/distil-whisper/distil-large-v2/tensorboard?params=scalars#frame ## Results The distilled model performs to within 1% WER of Whisper on out-of-distribution (OOD) short-form audio, and outperforms Whisper by 0.1% on OOD long-form audio. This performance gain is attributed to lower hallucinations. For a detailed per-dataset breakdown of the evaluation results, refer to Tables 16 and 17 of the [Distil-Whisper paper](https://arxiv.org/abs/2311.00430) Distil-Whisper is also evaluated on the [ESB benchmark](https://arxiv.org/abs/2210.13352) datasets as part of the [OpenASR leaderboard](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard), where it performs to within 0.2% WER of Whisper. ## Reproducing Distil-Whisper Training and evaluation code to reproduce Distil-Whisper is available under the Distil-Whisper repository: https://github.com/huggingface/distil-whisper/tree/main/training ## License Distil-Whisper inherits the [MIT license](https://github.com/huggingface/distil-whisper/blob/main/LICENSE) from OpenAI's Whisper model. ## Citation If you use this model, please consider citing the [Distil-Whisper paper](https://arxiv.org/abs/2311.00430): ``` @misc{gandhi2023distilwhisper, title={Distil-Whisper: Robust Knowledge Distillation via Large-Scale Pseudo Labelling}, author={Sanchit Gandhi and Patrick von Platen and Alexander M. Rush}, year={2023}, eprint={2311.00430}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ## Acknowledgements * OpenAI for the Whisper [model](https://huggingface.co/openai/whisper-large-v2) and [original codebase](https://github.com/openai/whisper) * Hugging Face 🤗 [Transformers](https://github.com/huggingface/transformers) for the model integration * Google's [TPU Research Cloud (TRC)](https://sites.research.google/trc/about/) programme for Cloud TPU v4s * [`@rsonavane`](https://huggingface.co/rsonavane/distil-whisper-large-v2-8-ls) for releasing an early iteration of Distil-Whisper on the LibriSpeech dataset
SimianLuo/LCM_Dreamshaper_v7
SimianLuo
"2024-03-05T08:32:22Z"
95,479
377
diffusers
[ "diffusers", "onnx", "safetensors", "text-to-image", "en", "arxiv:2310.04378", "license:mit", "diffusers:LatentConsistencyModelPipeline", "region:us" ]
text-to-image
"2023-10-14T08:26:52Z"
--- license: mit language: - en pipeline_tag: text-to-image tags: - text-to-image --- # Latent Consistency Models Official Repository of the paper: *[Latent Consistency Models](https://arxiv.org/abs/2310.04378)*. Project Page: https://latent-consistency-models.github.io ## Try our Hugging Face demos: [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/SimianLuo/Latent_Consistency_Model) ## Model Descriptions: Distilled from [Dreamshaper v7](https://huggingface.co/Lykon/dreamshaper-7) fine-tune of [Stable-Diffusion v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) with only 4,000 training iterations (~32 A100 GPU Hours). ## Generation Results: <p align="center"> <img src="teaser.png"> </p> By distilling classifier-free guidance into the model's input, LCM can generate high-quality images in very short inference time. We compare the inference time at the setting of 768 x 768 resolution, CFG scale w=8, batchsize=4, using a A800 GPU. <p align="center"> <img src="speed_fid.png"> </p> ## Usage You can try out Latency Consistency Models directly on: [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/SimianLuo/Latent_Consistency_Model) To run the model yourself, you can leverage the 🧨 Diffusers library: 1. Install the library: ``` pip install --upgrade diffusers # make sure to use at least diffusers >= 0.22 pip install transformers accelerate ``` 2. Run the model: ```py from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7") # To save GPU memory, torch.float16 can be used, but it may compromise image quality. pipe.to(torch_device="cuda", torch_dtype=torch.float32) prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k" # Can be set to 1~50 steps. LCM support fast inference even <= 4 steps. Recommend: 1~8 steps. num_inference_steps = 4 images = pipe(prompt=prompt, num_inference_steps=num_inference_steps, guidance_scale=8.0, lcm_origin_steps=50, output_type="pil").images ``` For more information, please have a look at the official docs: 👉 https://huggingface.co/docs/diffusers/api/pipelines/latent_consistency_models#latent-consistency-models ## Usage (Deprecated) 1. Install the library: ``` pip install diffusers transformers accelerate ``` 2. Run the model: ```py from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", custom_pipeline="latent_consistency_txt2img", custom_revision="main", revision="fb9c5d") # To save GPU memory, torch.float16 can be used, but it may compromise image quality. pipe.to(torch_device="cuda", torch_dtype=torch.float32) prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k" # Can be set to 1~50 steps. LCM support fast inference even <= 4 steps. Recommend: 1~8 steps. num_inference_steps = 4 images = pipe(prompt=prompt, num_inference_steps=num_inference_steps, guidance_scale=8.0, output_type="pil").images ``` ## BibTeX ```bibtex @misc{luo2023latent, title={Latent Consistency Models: Synthesizing High-Resolution Images with Few-Step Inference}, author={Simian Luo and Yiqin Tan and Longbo Huang and Jian Li and Hang Zhao}, year={2023}, eprint={2310.04378}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
NbAiLab/nb-wav2vec2-300m-nynorsk
NbAiLab
"2023-10-06T12:47:40Z"
95,413
0
transformers
[ "transformers", "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "nn", "dataset:NbAiLab/NPSC", "arxiv:2307.01672", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
"2022-03-02T23:29:04Z"
--- license: apache-2.0 tags: - automatic-speech-recognition datasets: - NbAiLab/NPSC language: - nn model-index: - name: nb-wav2vec2-300m-nynorsk results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: NPSC type: NbAiLab/NPSC args: 16K_mp3_nynorsk metrics: - name: Test (Nynorsk) WER type: wer value: 0.1222 - name: Test (Nynorsk) CER type: cer value: 0.0419 --- # Norwegian Wav2Vec2 Model - 300M - VoxRex - Nynorsk This model is finetuned on top of feature extractor [VoxRex-model](https://huggingface.co/KBLab/wav2vec2-large-voxrex) from the National Library of Sweden. The finetuned model achieves the following results on the test set with a 5-gram KenLM. The numbers in parentheses are the results without the language model: - **WER: 0.1222** (0.1537) - **CER: 0.0419** (0.0468) ## Model description This is one of several Wav2Vec-models our team created during the 🤗 hosted [Robust Speech Event](https://discuss.huggingface.co/t/open-to-the-community-robust-speech-recognition-challenge/13614?s=09). This is the complete list of our models and their final scores: | Model | Final WER | | |:--------------|:------------|:------------:| | [NbAiLab/nb-wav2vec2-1b-bokmaal](https://huggingface.co/NbAiLab/nb-wav2vec2-1b-bokmaal) | 6.33 | | | [NbAiLab/nb-wav2vec2-300m-bokmaal](https://huggingface.co/NbAiLab/nb-wav2vec2-300m-bokmaal) | 7.03 | | | [NbAiLab/nb-wav2vec2-1b-nynorsk](https://huggingface.co/NbAiLab/nb-wav2vec2-1b-nynorsk) | 11.32 | | | NbAiLab/nb-wav2vec2-300m-nynorsk (this model) | 12.22 | | ### Dataset In parallel with the event, the team also converted the [Norwegian Parliamentary Speech Corpus (NPSC)](https://www.nb.no/sprakbanken/en/resource-catalogue/oai-nb-no-sbr-58/) to the [NbAiLab/NPSC](https://huggingface.co/datasets/NbAiLab/NPSC) in 🤗 Dataset format and used that as the main source for training. ## Code We have released all the code developed during the event so that the Norwegian NLP community can build upon it when developing even better Norwegian ASR models. The finetuning of these models is not very computationally demanding. After following the instructions here, you should be able to train your own automatic speech recognition system in less than a day with an average GPU. ## Team The following people contributed to building this model: Rolv-Arild Braaten, Per Egil Kummervold, Andre Kåsen, Javier de la Rosa, Per Erik Solberg, and Freddy Wetjen. ## Training procedure To reproduce these results, we strongly recommend that you follow the [instructions from 🤗](https://github.com/huggingface/transformers/tree/master/examples/research_projects/robust-speech-event#talks) to train a simple Swedish model. When you have verified that you are able to do this, create a fresh new repo. You can then start by copying the files ```run.sh``` and ```run_speech_recognition_ctc.py``` from our repo. Running these will create all the other necessary files, and should let you reproduce our results. With some tweaks to the hyperparameters, you might even be able to build an even better ASR. Good luck! ### Language Model As the scores indicate, adding even a simple 5-gram language will improve the results. 🤗 has provided another [very nice blog](https://huggingface.co/blog/wav2vec2-with-ngram) explaining how to add a 5-gram language model to improve the ASR model. You can build this from your own corpus, for instance by extracting some suitable text from the [Norwegian Colossal Corpus](https://huggingface.co/datasets/NbAiLab/NCC). You can also skip some of the steps in the guide, and copy the [5-gram model from this repo](https://huggingface.co/NbAiLab/XLSR-300M-bokmaal/tree/main/language_model). ### Parameters The final model was run using these parameters: ``` --dataset_name="NbAiLab/NPSC" --model_name_or_path="KBLab/wav2vec2-large-voxrex" --dataset_config_name="16K_mp3_nynorsk" --output_dir="./" --overwrite_output_dir --num_train_epochs="80" --per_device_train_batch_size="16" --per_device_eval_batch_size="16" --gradient_accumulation_steps="2" --learning_rate="1e-4" --warmup_steps="2000" --length_column_name="input_length" --evaluation_strategy="steps" --text_column_name="text" --save_steps="500" --eval_steps="500" --logging_steps="100" --layerdrop="0.041" --attention_dropout="0.094" --activation_dropout="0.055" --hidden_dropout="0.047" --save_total_limit="3" --freeze_feature_encoder --feat_proj_dropout="0.04" --mask_time_prob="0.082" --mask_time_length="10" --mask_feature_prob="0.25" --mask_feature_length="64" --gradient_checkpointing --min_duration_in_seconds="0.5" --max_duration_in_seconds="30.0" --use_auth_token --seed="42" --fp16 --group_by_length --do_train --do_eval --push_to_hub --preprocessing_num_workers="32" ``` Using these settings, the training might take 3-4 days on an average GPU. You can, however, get a decent model and faster results by tweaking these parameters. | Parameter| Comment | |:-------------|:-----| | per_device_train_batch_size | Adjust this to the maximum of available memory. 16 or 24 might be good settings depending on your system | |gradient_accumulation_steps |Can be adjusted even further up to increase batch size and speed up training without running into memory issues | | learning_rate|Can be increased, maybe as high as 1e-4. Speeds up training but might add instability | | epochs| Can be decreased significantly. This is a huge dataset and you might get a decent result already after a couple of epochs| ## Citation ```bibtex @inproceedings{de-la-rosa-etal-2023-boosting, title = "Boosting {N}orwegian Automatic Speech Recognition", author = "De La Rosa, Javier and Braaten, Rolv-Arild and Kummervold, Per and Wetjen, Freddy", booktitle = "Proceedings of the 24th Nordic Conference on Computational Linguistics (NoDaLiDa)", month = may, year = "2023", address = "T{\'o}rshavn, Faroe Islands", publisher = "University of Tartu Library", url = "https://aclanthology.org/2023.nodalida-1.55", pages = "555--564", abstract = "In this paper, we present several baselines for automatic speech recognition (ASR) models for the two official written languages in Norway: Bokm{\aa}l and Nynorsk. We compare the performance of models of varying sizes and pre-training approaches on multiple Norwegian speech datasets. Additionally, we measure the performance of these models against previous state-of-the-art ASR models, as well as on out-of-domain datasets. We improve the state of the art on the Norwegian Parliamentary Speech Corpus (NPSC) from a word error rate (WER) of 17.10{\%} to 7.60{\%}, with models achieving 5.81{\%} for Bokm{\aa}l and 11.54{\%} for Nynorsk. We also discuss the challenges and potential solutions for further improving ASR models for Norwegian.", } ``` See https://arxiv.org/abs/2307.01672
Ericwang/tiny-random-ast
Ericwang
"2022-12-17T04:34:07Z"
94,903
1
transformers
[ "transformers", "pytorch", "audio-spectrogram-transformer", "feature-extraction", "endpoints_compatible", "region:us" ]
feature-extraction
"2022-12-17T01:36:12Z"
One custom ast model for testing of HF repos
tianweiy/DMD2
tianweiy
"2024-06-11T18:14:59Z"
94,399
78
diffusers
[ "diffusers", "text-to-image", "stable-diffusion", "diffusion distillation", "arxiv:2405.14867", "license:cc-by-nc-4.0", "region:us" ]
text-to-image
"2024-05-23T00:19:26Z"
--- license: cc-by-nc-4.0 library_name: diffusers tags: - text-to-image - stable-diffusion - diffusion distillation --- # DMD2 Model Card ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/63363b864067f020756275b7/YhssMfS_1e6q5fHKh9qrc.jpeg) > [**Improved Distribution Matching Distillation for Fast Image Synthesis**](https://arxiv.org/abs/2405.14867), > Tianwei Yin, Michaël Gharbi, Taesung Park, Richard Zhang, Eli Shechtman, Frédo Durand, William T. Freeman ## Contact Feel free to contact us if you have any questions about the paper! Tianwei Yin [tianweiy@mit.edu](mailto:tianweiy@mit.edu) ## Usage We can use the standard diffuser pipeline: #### 4-step UNet generation ```python import torch from diffusers import DiffusionPipeline, UNet2DConditionModel, LCMScheduler from huggingface_hub import hf_hub_download from safetensors.torch import load_file base_model_id = "stabilityai/stable-diffusion-xl-base-1.0" repo_name = "tianweiy/DMD2" ckpt_name = "dmd2_sdxl_4step_unet_fp16.bin" # Load model. unet = UNet2DConditionModel.from_config(base_model_id, subfolder="unet").to("cuda", torch.float16) unet.load_state_dict(torch.load(hf_hub_download(repo_name, ckpt_name), map_location="cuda")) pipe = DiffusionPipeline.from_pretrained(base_model_id, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda") pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) prompt="a photo of a cat" # LCMScheduler's default timesteps are different from the one we used for training image=pipe(prompt=prompt, num_inference_steps=4, guidance_scale=0, timesteps=[999, 749, 499, 249]).images[0] ``` #### 4-step LoRA generation ```python import torch from diffusers import DiffusionPipeline, UNet2DConditionModel, LCMScheduler from huggingface_hub import hf_hub_download from safetensors.torch import load_file base_model_id = "stabilityai/stable-diffusion-xl-base-1.0" repo_name = "tianweiy/DMD2" ckpt_name = "dmd2_sdxl_4step_lora_fp16.safetensors" # Load model. pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16, variant="fp16").to("cuda") pipe.load_lora_weights(hf_hub_download(repo_name, ckpt_name)) pipe.fuse_lora(lora_scale=1.0) # we might want to make the scale smaller for community models pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) prompt="a photo of a cat" # LCMScheduler's default timesteps are different from the one we used for training image=pipe(prompt=prompt, num_inference_steps=4, guidance_scale=0, timesteps=[999, 749, 499, 249]).images[0] ``` #### 1-step UNet generation ```python import torch from diffusers import DiffusionPipeline, UNet2DConditionModel, LCMScheduler from huggingface_hub import hf_hub_download from safetensors.torch import load_file base_model_id = "stabilityai/stable-diffusion-xl-base-1.0" repo_name = "tianweiy/DMD2" ckpt_name = "dmd2_sdxl_1step_unet_fp16.bin" # Load model. unet = UNet2DConditionModel.from_config(base_model_id, subfolder="unet").to("cuda", torch.float16) unet.load_state_dict(torch.load(hf_hub_download(repo_name, ckpt_name), map_location="cuda")) pipe = DiffusionPipeline.from_pretrained(base_model_id, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda") pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) prompt="a photo of a cat" image=pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0, timesteps=[399]).images[0] ``` #### 4-step T2I Adapter ```python from diffusers import StableDiffusionXLAdapterPipeline, T2IAdapter, AutoencoderKL, UNet2DConditionModel, LCMScheduler from diffusers.utils import load_image, make_image_grid from controlnet_aux.canny import CannyDetector from huggingface_hub import hf_hub_download import torch # load adapter adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-canny-sdxl-1.0", torch_dtype=torch.float16, varient="fp16").to("cuda") vae=AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) base_model_id = "stabilityai/stable-diffusion-xl-base-1.0" repo_name = "tianweiy/DMD2" ckpt_name = "dmd2_sdxl_4step_unet_fp16.bin" # Load model. unet = UNet2DConditionModel.from_config(base_model_id, subfolder="unet").to("cuda", torch.float16) unet.load_state_dict(torch.load(hf_hub_download(repo_name, ckpt_name), map_location="cuda")) pipe = StableDiffusionXLAdapterPipeline.from_pretrained( base_model_id, unet=unet, vae=vae, adapter=adapter, torch_dtype=torch.float16, variant="fp16", ).to("cuda") pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe.enable_xformers_memory_efficient_attention() canny_detector = CannyDetector() url = "https://huggingface.co/Adapter/t2iadapter/resolve/main/figs_SDXLV1.0/org_canny.jpg" image = load_image(url) # Detect the canny map in low resolution to avoid high-frequency details image = canny_detector(image, detect_resolution=384, image_resolution=1024)#.resize((1024, 1024)) prompt = "Mystical fairy in real, magic, 4k picture, high quality" gen_images = pipe( prompt=prompt, image=image, num_inference_steps=4, guidance_scale=0, adapter_conditioning_scale=0.8, adapter_conditioning_factor=0.5, timesteps=[999, 749, 499, 249] ).images[0] gen_images.save('out_canny.png') ``` For more information, please refer to the [code repository](https://github.com/tianweiy/DMD2) ## License Improved Distribution Matching Distillation is released under [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-nc-sa/4.0/deed.en). ## Citation If you find DMD2 useful or relevant to your research, please kindly cite our papers: ```bib @article{yin2024improved, title={Improved Distribution Matching Distillation for Fast Image Synthesis}, author={Yin, Tianwei and Gharbi, Micha{\"e}l and Park, Taesung and Zhang, Richard and Shechtman, Eli and Durand, Fredo and Freeman, William T}, journal={arXiv:2405.14867}, year={2024} } @inproceedings{yin2024onestep, title={One-step Diffusion with Distribution Matching Distillation}, author={Yin, Tianwei and Gharbi, Micha{\"e}l and Zhang, Richard and Shechtman, Eli and Durand, Fr{\'e}do and Freeman, William T and Park, Taesung}, booktitle={CVPR}, year={2024} } ``` ## Acknowledgments This work was done while Tianwei Yin was a full-time student at MIT. It was developed based on our reimplementation of the original DMD paper. This work was supported by the National Science Foundation under Cooperative Agreement PHY-2019786 (The NSF AI Institute for Artificial Intelligence and Fundamental Interactions, http://iaifi.org/), by NSF Grant 2105819, by NSF CISE award 1955864, and by funding from Google, GIST, Amazon, and Quanta Computer.
sentinet/suicidality
sentinet
"2024-01-07T08:40:48Z"
93,844
20
transformers
[ "transformers", "pytorch", "safetensors", "electra", "text-classification", "classification", "suicidality", "suicidal text detection", "suicidal sentiment", "sentiment", "suicide", "self harm", "depression", "en", "license:cc0-1.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
"2023-08-31T19:17:44Z"
--- license: cc0-1.0 language: - en metrics: - accuracy: 0.939432 - recall: 0.937164 - precision: 0.92822 - f1: 0.92822 tags: - classification - suicidality - suicidal text detection - suicidal sentiment - sentiment - suicide - self harm - depression pipeline_tag: text-classification --- # Advanced Suicidality Classifier Model ## Introduction Welcome to the Suicidality Detection AI Model! This project aims to provide a machine learning solution for detecting sequences of words indicative of suicidality in text. By utilizing the ELECTRA architecture and fine-tuning on a diverse dataset, we have created a powerful classification model that can distinguish between suicidal and non-suicidal text expressions. ## Labels The model classifies input text into two labels: - `LABEL_0`: Indicates that the text is non-suicidal. - `LABEL_1`: Indicates that the text is indicative of suicidality. ## Training The model was fine-tuned using the ELECTRA architecture on a carefully curated dataset. Our training process involved cleaning and preprocessing various text sources to create a comprehensive training set. The training results indicate promising performance, with metrics including: ## Performance The model's performance on the validation dataset is as follows: - Accuracy: 0.939432 - Recall: 0.937164 - Precision: 0.92822 - F1 Score: 0.932672 These metrics demonstrate the model's ability to accurately classify sequences of text as either indicative of suicidality or non-suicidal. ## Data Sources We collected data from multiple sources to create a rich and diverse training dataset: - https://www.kaggle.com/datasets/thedevastator/c-ssrs-labeled-suicidality-in-500-anonymized-red - https://www.kaggle.com/datasets/amangoyl/reddit-dataset-for-multi-task-nlp - https://www.kaggle.com/datasets/imeshsonu/suicideal-phrases - https://raw.githubusercontent.com/laxmimerit/twitter-suicidal-intention-dataset/master/twitter-suicidal_data.csv - https://www.kaggle.com/datasets/mohanedmashaly/suicide-notes - https://www.kaggle.com/datasets/natalialech/suicidal-ideation-on-twitter The data underwent thorough cleaning and preprocessing before being used for training the model. ## How to Use ### Installation To use the model, you need to install the Transformers library: ```bash pip install transformers ``` ### Using the Model You can utilize the model for text classification using the following code snippets: 1. Using the pipeline approach: ```python from transformers import pipeline classifier = pipeline("sentiment-analysis", model="sentinetyd/suicidality") result = classifier("text to classify") print(result) ``` 2. Using the tokenizer and model programmatically: ```python from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained("sentinetyd/suicidality") model = AutoModel.from_pretrained("sentinetyd/suicidality") # Perform tokenization and prediction using the tokenizer and model ``` ## Ethical Considerations Suicidality is a sensitive and serious topic. It's important to exercise caution and consider ethical implications when using this model. Predictions made by the model should be handled with care and used to complement human judgment and intervention. ## Model Credits We would like to acknowledge the "gooohjy/suicidal-electra" model available on Hugging Face's model repository. You can find the model at [this link](https://huggingface.co/gooohjy/suicidal-electra). We used this model as a starting point and fine-tuned it to create our specialized suicidality detection model. ## Contributions We welcome contributions and feedback from the community to further improve the model's performance, enhance the dataset, and ensure its responsible deployment.
Qdrant/all-MiniLM-L6-v2-onnx
Qdrant
"2024-07-15T15:10:38Z"
93,838
3
transformers
[ "transformers", "onnx", "bert", "feature-extraction", "sentence-similarity", "license:apache-2.0", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
sentence-similarity
"2024-01-16T08:09:23Z"
--- license: apache-2.0 pipeline_tag: sentence-similarity --- ONNX port of [sentence-transformers/all-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2) for text classification and similarity searches. ### Usage Here's an example of performing inference using the model with [FastEmbed](https://github.com/qdrant/fastembed). > Note: This model is supposed to be used with Qdrant. Vectors have to be configured with [Modifier.IDF](https://qdrant.tech/documentation/concepts/indexing/?q=modifier#idf-modifier). ```py from fastembed import TextEmbedding documents = [ "You should stay, study and sprint.", "History can only prepare us to be surprised yet again.", ] model = TextEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2") embeddings = list(model.embed(documents)) # [ # array([ # 0.00611658, 0.00068912, -0.0203846, ..., -0.01751488, -0.01174267, # 0.01463472 # ], # dtype=float32), # array([ # 0.00173448, -0.00329958, 0.01557874, ..., -0.01473586, 0.0281806, # -0.00448205 # ], # dtype=float32) # ] ```
llava-hf/LLaVA-NeXT-Video-7B-hf
llava-hf
"2024-10-08T06:11:38Z"
93,628
42
transformers
[ "transformers", "safetensors", "llava_next_video", "pretraining", "video-text-to-text", "en", "dataset:lmms-lab/VideoChatGPT", "arxiv:2405.21075", "license:llama2", "endpoints_compatible", "region:us" ]
video-text-to-text
"2024-06-05T13:28:32Z"
--- language: - en license: llama2 pipeline_tag: video-text-to-text datasets: - lmms-lab/VideoChatGPT --- # LLaVA-NeXT-Video Model Card Check out also the Google Colab demo to run Llava on a free-tier Google Colab instance: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1CZggLHrjxMReG-FNOmqSOdi4z7NPq6SO?usp=sharing) Disclaimer: The team releasing LLaVa-NeXT-Video did not write a model card for this model so this model card has been written by the Hugging Face team. ## 📄 Model details **Model type:** LLaVA-Next-Video is an open-source chatbot trained by fine-tuning LLM on multimodal instruction-following data. The model is buit on top of LLaVa-NeXT by tuning on a mix of video and image data to achieves better video understanding capabilities. The videos were sampled uniformly to be 32 frames per clip. The model is a current SOTA among open-source models on [VideoMME bench](https://arxiv.org/abs/2405.21075). Base LLM: [lmsys/vicuna-7b-v1.5](https://huggingface.co/lmsys/vicuna-13b-v1.5) ![llava_next_video_arch](demo.png) **Model date:** LLaVA-Next-Video-7B was trained in April 2024. **Paper or resources for more information:** https://github.com/LLaVA-VL/LLaVA-NeXT ## 📚 Training dataset ### Image - 558K filtered image-text pairs from LAION/CC/SBU, captioned by BLIP. - 158K GPT-generated multimodal instruction-following data. - 500K academic-task-oriented VQA data mixture. - 50K GPT-4V data mixture. - 40K ShareGPT data. ### Video - 100K VideoChatGPT-Instruct. ## 📊 Evaluation dataset A collection of 4 benchmarks, including 3 academic VQA benchmarks and 1 captioning benchmark. ## 🚀 How to use the model First, make sure to have `transformers >= 4.42.0`. The model supports multi-visual and multi-prompt generation. Meaning that you can pass multiple images/videos in your prompt. Make sure also to follow the correct prompt template (`USER: xxx\nASSISTANT:`) and add the token `<image>` or `<video>` to the location where you want to query images/videos: Below is an example script to run generation in `float16` precision on a GPU device: ```python import av import torch import numpy as np from huggingface_hub import hf_hub_download from transformers import LlavaNextVideoProcessor, LlavaNextVideoForConditionalGeneration model_id = "llava-hf/LLaVA-NeXT-Video-7B-hf" model = LlavaNextVideoForConditionalGeneration.from_pretrained( model_id, torch_dtype=torch.float16, low_cpu_mem_usage=True, ).to(0) processor = LlavaNextVideoProcessor.from_pretrained(model_id) def read_video_pyav(container, indices): ''' Decode the video with PyAV decoder. Args: container (`av.container.input.InputContainer`): PyAV container. indices (`List[int]`): List of frame indices to decode. Returns: result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ''' frames = [] container.seek(0) start_index = indices[0] end_index = indices[-1] for i, frame in enumerate(container.decode(video=0)): if i > end_index: break if i >= start_index and i in indices: frames.append(frame) return np.stack([x.to_ndarray(format="rgb24") for x in frames]) # define a chat history and use `apply_chat_template` to get correctly formatted prompt # Each value in "content" has to be a list of dicts with types ("text", "image", "video") conversation = [ { "role": "user", "content": [ {"type": "text", "text": "Why is this video funny?"}, {"type": "video"}, ], }, ] prompt = processor.apply_chat_template(conversation, add_generation_prompt=True) video_path = hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset") container = av.open(video_path) # sample uniformly 8 frames from the video, can sample more for longer videos total_frames = container.streams.video[0].frames indices = np.arange(0, total_frames, total_frames / 8).astype(int) clip = read_video_pyav(container, indices) inputs_video = processor(text=prompt, videos=clip, padding=True, return_tensors="pt").to(model.device) output = model.generate(**inputs_video, max_new_tokens=100, do_sample=False) print(processor.decode(output[0][2:], skip_special_tokens=True)) ``` ### Inference with images as inputs To generate from images use the below code after loading the model as shown above: ```python import requests from PIL import Image conversation = [ { "role": "user", "content": [ {"type": "text", "text": "What are these?"}, {"type": "image"}, ], }, ] prompt = processor.apply_chat_template(conversation, add_generation_prompt=True) image_file = "http://images.cocodataset.org/val2017/000000039769.jpg" raw_image = Image.open(requests.get(image_file, stream=True).raw) inputs_image = processor(text=prompt, images=raw_image, return_tensors='pt').to(0, torch.float16) output = model.generate(**inputs_video, max_new_tokens=100, do_sample=False) print(processor.decode(output[0][2:], skip_special_tokens=True)) ``` ### Inference with images and videos as inputs To generate from images and videos in one generate use the below code after loading the model as shown above: ```python conversation_1 = [ { "role": "user", "content": [ {"type": "text", "text": "What's the content of the image>"}, {"type": "image"}, ], } ] conversation_2 = [ { "role": "user", "content": [ {"type": "text", "text": "Why is this video funny?"}, {"type": "video"}, ], }, ] prompt_1 = processor.apply_chat_template(conversation_1, add_generation_prompt=True) prompt_2 = processor.apply_chat_template(conversation_2, add_generation_prompt=True) s = processor(text=[prompt_1, prompt_2], images=image, videos=clip, padding=True, return_tensors="pt").to(model.device) # Generate generate_ids = model.generate(**inputs, max_new_tokens=100) out = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False) print(out) ``` ### Model optimization #### 4-bit quantization through `bitsandbytes` library First make sure to install `bitsandbytes`, `pip install bitsandbytes` and make sure to have access to a CUDA compatible GPU device. Simply change the snippet above with: ```diff model = LlavaNextVideoForConditionalGeneration.from_pretrained( model_id, torch_dtype=torch.float16, low_cpu_mem_usage=True, + load_in_4bit=True ) ``` #### Use Flash-Attention 2 to further speed-up generation First make sure to install `flash-attn`. Refer to the [original repository of Flash Attention](https://github.com/Dao-AILab/flash-attention) regarding that package installation. Simply change the snippet above with: ```diff model = LlavaNextVideoForConditionalGeneration.from_pretrained( model_id, torch_dtype=torch.float16, low_cpu_mem_usage=True, + use_flash_attention_2=True ).to(0) ``` ## 🔒 License Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved. ## ✏️ Citation If you find our paper and code useful in your research: ```BibTeX @misc{zhang2024llavanextvideo, title={LLaVA-NeXT: A Strong Zero-shot Video Understanding Model}, url={https://llava-vl.github.io/blog/2024-04-30-llava-next-video/}, author={Zhang, Yuanhan and Li, Bo and Liu, haotian and Lee, Yong jae and Gui, Liangke and Fu, Di and Feng, Jiashi and Liu, Ziwei and Li, Chunyuan}, month={April}, year={2024} } ``` ```BibTeX @misc{liu2024llavanext, title={LLaVA-NeXT: Improved reasoning, OCR, and world knowledge}, url={https://llava-vl.github.io/blog/2024-01-30-llava-next/}, author={Liu, Haotian and Li, Chunyuan and Li, Yuheng and Li, Bo and Zhang, Yuanhan and Shen, Sheng and Lee, Yong Jae}, month={January}, year={2024} } ```
MarcoMancini/low-law-emb
MarcoMancini
"2023-09-28T09:55:02Z"
93,580
0
transformers
[ "transformers", "pytorch", "endpoints_compatible", "region:us" ]
null
"2023-08-28T08:30:52Z"
Found. Redirecting to https://cdn-lfs.hf.co/repos/1a/4d/1a4d4ab1858984b063c6453b1c9583c03ebb210406c2389eadcfc236cddbf228/7f91b71dee029cf890650508c68e62ba4d494adddb8039b458311061d36a28a5?response-content-disposition=inline%3B+filename*%3DUTF-8%27%27README.md%3B+filename%3D%22README.md%22%3B&response-content-type=text%2Fmarkdown&Expires=1728956107&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcyODk1NjEwN319LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5oZi5jby9yZXBvcy8xYS80ZC8xYTRkNGFiMTg1ODk4NGIwNjNjNjQ1M2IxYzk1ODNjMDNlYmIyMTA0MDZjMjM4OWVhZGNmYzIzNmNkZGJmMjI4LzdmOTFiNzFkZWUwMjljZjg5MDY1MDUwOGM2OGU2MmJhNGQ0OTRhZGRkYjgwMzliNDU4MzExMDYxZDM2YTI4YTU%7EcmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=GK9BY0oVnSmmpd1sFKz0B6yxZlwtuzkl1S3LTYAOzEKEIZwoctguv%7EcazLlfQ1dXVrYNk-rnQ-cjrFkd5Z597Ct2i-Gb6gR6cC-5h9MF5JE7P6qbrbviEaXtxEHvjy3kZtneE%7Es1Au6Ca%7E8nQl%7Ek36oyKsMlB-U9uEgQ3OnxrWsXpIqzvXnFQ%7Edw7sGAV%7ER6zP0ndPCPpkNk-a9FD3Pnw8XCKob3drEGrs6bxTXP4YYGEb3Aek9oXeTtamJBY5mpfjpWFTatCpiTszSDNph10Faaxxz-TC7-pfTTvXiBRenpH6lBXK3dc1OJkXfszrPuH2CVxj%7EHxkrrPtZ4bjOA3g__&Key-Pair-Id=K3RPWS32NSSJCE
timbrooks/instruct-pix2pix
timbrooks
"2023-07-05T16:19:25Z"
93,547
960
diffusers
[ "diffusers", "safetensors", "image-to-image", "license:mit", "diffusers:StableDiffusionInstructPix2PixPipeline", "region:us" ]
image-to-image
"2023-01-20T04:27:06Z"
--- license: mit tags: - image-to-image --- # InstructPix2Pix: Learning to Follow Image Editing Instructions GitHub: https://github.com/timothybrooks/instruct-pix2pix <img src='https://instruct-pix2pix.timothybrooks.com/teaser.jpg'/> ## Example To use `InstructPix2Pix`, install `diffusers` using `main` for now. The pipeline will be available in the next release ```bash pip install diffusers accelerate safetensors transformers ``` ```python import PIL import requests import torch from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler model_id = "timbrooks/instruct-pix2pix" pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, safety_checker=None) pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) url = "https://raw.githubusercontent.com/timothybrooks/instruct-pix2pix/main/imgs/example.jpg" def download_image(url): image = PIL.Image.open(requests.get(url, stream=True).raw) image = PIL.ImageOps.exif_transpose(image) image = image.convert("RGB") return image image = download_image(url) prompt = "turn him into cyborg" images = pipe(prompt, image=image, num_inference_steps=10, image_guidance_scale=1).images images[0] ```
TheBloke/CodeLlama-7B-Instruct-GPTQ
TheBloke
"2023-09-27T12:46:05Z"
93,522
45
transformers
[ "transformers", "safetensors", "llama", "text-generation", "llama-2", "custom_code", "code", "arxiv:2308.12950", "license:llama2", "autotrain_compatible", "text-generation-inference", "4-bit", "gptq", "region:us" ]
text-generation
"2023-08-24T20:27:24Z"
--- language: - code license: llama2 tags: - llama-2 model_name: CodeLlama 7B Instruct base_model: codellama/CodeLlama-7b-instruct-hf inference: false model_creator: Meta model_type: llama pipeline_tag: text-generation prompt_template: '[INST] Write code to solve the following coding problem that obeys the constraints and passes the example test cases. Please wrap your code answer using ```: {prompt} [/INST] ' quantized_by: TheBloke --- <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # CodeLlama 7B Instruct - GPTQ - Model creator: [Meta](https://huggingface.co/meta-llama) - Original model: [CodeLlama 7B Instruct](https://huggingface.co/codellama/CodeLlama-7b-instruct-hf) <!-- description start --> ## Description This repo contains GPTQ model files for [Meta's CodeLlama 7B Instruct](https://huggingface.co/codellama/CodeLlama-7b-instruct-hf). Multiple GPTQ parameter permutations are provided; see Provided Files below for details of the options provided, their parameters, and the software used to create them. <!-- description end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GGUF) * [Meta's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/codellama/CodeLlama-7b-instruct-hf) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: CodeLlama ``` [INST] Write code to solve the following coding problem that obeys the constraints and passes the example test cases. Please wrap your code answer using ```: {prompt} [/INST] ``` <!-- prompt-template end --> <!-- README_GPTQ.md-provided-files start --> ## Provided files and GPTQ parameters Multiple quantisation parameters are provided, to allow you to choose the best one for your hardware and requirements. Each separate quant is in a different branch. See below for instructions on fetching from different branches. All recent GPTQ files are made with AutoGPTQ, and all files in non-main branches are made with AutoGPTQ. Files in the `main` branch which were uploaded before August 2023 were made with GPTQ-for-LLaMa. <details> <summary>Explanation of GPTQ parameters</summary> - Bits: The bit size of the quantised model. - GS: GPTQ group size. Higher numbers use less VRAM, but have lower quantisation accuracy. "None" is the lowest possible value. - Act Order: True or False. Also known as `desc_act`. True results in better quantisation accuracy. Some GPTQ clients have had issues with models that use Act Order plus Group Size, but this is generally resolved now. - Damp %: A GPTQ parameter that affects how samples are processed for quantisation. 0.01 is default, but 0.1 results in slightly better accuracy. - GPTQ dataset: The dataset used for quantisation. Using a dataset more appropriate to the model's training can improve quantisation accuracy. Note that the GPTQ dataset is not the same as the dataset used to train the model - please refer to the original model repo for details of the training dataset(s). - Sequence Length: The length of the dataset sequences used for quantisation. Ideally this is the same as the model sequence length. For some very long sequence models (16+K), a lower sequence length may have to be used. Note that a lower sequence length does not limit the sequence length of the quantised model. It only impacts the quantisation accuracy on longer inference sequences. - ExLlama Compatibility: Whether this file can be loaded with ExLlama, which currently only supports Llama models in 4-bit. </details> | Branch | Bits | GS | Act Order | Damp % | GPTQ Dataset | Seq Len | Size | ExLlama | Desc | | ------ | ---- | -- | --------- | ------ | ------------ | ------- | ---- | ------- | ---- | | [main](https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GPTQ/tree/main) | 4 | 128 | No | 0.1 | [Evol Instruct Code](https://huggingface.co/datasets/nickrosh/Evol-Instruct-Code-80k-v1) | 8192 | 3.90 GB | Yes | 4-bit, without Act Order and group size 128g. | | [gptq-4bit-32g-actorder_True](https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GPTQ/tree/gptq-4bit-32g-actorder_True) | 4 | 32 | Yes | 0.1 | [Evol Instruct Code](https://huggingface.co/datasets/nickrosh/Evol-Instruct-Code-80k-v1) | 8192 | 4.28 GB | Yes | 4-bit, with Act Order and group size 32g. Gives highest possible inference quality, with maximum VRAM usage. | | [gptq-4bit-64g-actorder_True](https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GPTQ/tree/gptq-4bit-64g-actorder_True) | 4 | 64 | Yes | 0.1 | [Evol Instruct Code](https://huggingface.co/datasets/nickrosh/Evol-Instruct-Code-80k-v1) | 8192 | 4.02 GB | Yes | 4-bit, with Act Order and group size 64g. Uses less VRAM than 32g, but with slightly lower accuracy. | | [gptq-4bit-128g-actorder_True](https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GPTQ/tree/gptq-4bit-128g-actorder_True) | 4 | 128 | Yes | 0.1 | [Evol Instruct Code](https://huggingface.co/datasets/nickrosh/Evol-Instruct-Code-80k-v1) | 8192 | 3.90 GB | Yes | 4-bit, with Act Order and group size 128g. Uses even less VRAM than 64g, but with slightly lower accuracy. | | [gptq-8bit--1g-actorder_True](https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GPTQ/tree/gptq-8bit--1g-actorder_True) | 8 | None | Yes | 0.1 | [Evol Instruct Code](https://huggingface.co/datasets/nickrosh/Evol-Instruct-Code-80k-v1) | 8192 | 7.01 GB | No | 8-bit, with Act Order. No group size, to lower VRAM requirements. | | [gptq-8bit-128g-actorder_True](https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GPTQ/tree/gptq-8bit-128g-actorder_True) | 8 | 128 | Yes | 0.1 | [Evol Instruct Code](https://huggingface.co/datasets/nickrosh/Evol-Instruct-Code-80k-v1) | 8192 | 7.16 GB | No | 8-bit, with group size 128g for higher inference quality and with Act Order for even higher accuracy. | <!-- README_GPTQ.md-provided-files end --> <!-- README_GPTQ.md-download-from-branches start --> ## How to download from branches - In text-generation-webui, you can add `:branch` to the end of the download name, eg `TheBloke/CodeLlama-7B-Instruct-GPTQ:main` - With Git, you can clone a branch with: ``` git clone --single-branch --branch main https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GPTQ ``` - In Python Transformers code, the branch is the `revision` parameter; see below. <!-- README_GPTQ.md-download-from-branches end --> <!-- README_GPTQ.md-text-generation-webui start --> ## How to easily download and use this model in [text-generation-webui](https://github.com/oobabooga/text-generation-webui). Please make sure you're using the latest version of [text-generation-webui](https://github.com/oobabooga/text-generation-webui). It is strongly recommended to use the text-generation-webui one-click-installers unless you're sure you know how to make a manual install. 1. Click the **Model tab**. 2. Under **Download custom model or LoRA**, enter `TheBloke/CodeLlama-7B-Instruct-GPTQ`. - To download from a specific branch, enter for example `TheBloke/CodeLlama-7B-Instruct-GPTQ:main` - see Provided Files above for the list of branches for each option. 3. Click **Download**. 4. The model will start downloading. Once it's finished it will say "Done". 5. In the top left, click the refresh icon next to **Model**. 6. In the **Model** dropdown, choose the model you just downloaded: `CodeLlama-7B-Instruct-GPTQ` 7. The model will automatically load, and is now ready for use! 8. If you want any custom settings, set them and then click **Save settings for this model** followed by **Reload the Model** in the top right. * Note that you do not need to and should not set manual GPTQ parameters any more. These are set automatically from the file `quantize_config.json`. 9. Once you're ready, click the **Text Generation tab** and enter a prompt to get started! <!-- README_GPTQ.md-text-generation-webui end --> <!-- README_GPTQ.md-use-from-python start --> ## How to use this GPTQ model from Python code ### Install the necessary packages Requires: Transformers 4.32.0 or later, Optimum 1.12.0 or later, and AutoGPTQ 0.4.2 or later. ```shell pip3 install transformers>=4.32.0 optimum>=1.12.0 pip3 install auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/ # Use cu117 if on CUDA 11.7 ``` If you have problems installing AutoGPTQ using the pre-built wheels, install it from source instead: ```shell pip3 uninstall -y auto-gptq git clone https://github.com/PanQiWei/AutoGPTQ cd AutoGPTQ pip3 install . ``` ### For CodeLlama models only: you must use Transformers 4.33.0 or later. If 4.33.0 is not yet released when you read this, you will need to install Transformers from source: ```shell pip3 uninstall -y transformers pip3 install git+https://github.com/huggingface/transformers.git ``` ### You can then use the following code ```python from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_name_or_path = "TheBloke/CodeLlama-7B-Instruct-GPTQ" # To use a different branch, change revision # For example: revision="main" model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto", trust_remote_code=True, revision="main") tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True) prompt = "Tell me about AI" prompt_template=f'''[INST] Write code to solve the following coding problem that obeys the constraints and passes the example test cases. Please wrap your code answer using ```: {prompt} [/INST] ''' print("\n\n*** Generate:") input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda() output = model.generate(inputs=input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512) print(tokenizer.decode(output[0])) # Inference can also be done using transformers' pipeline print("*** Pipeline:") pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, do_sample=True, temperature=0.7, top_p=0.95, top_k=40, repetition_penalty=1.1 ) print(pipe(prompt_template)[0]['generated_text']) ``` <!-- README_GPTQ.md-use-from-python end --> <!-- README_GPTQ.md-compatibility start --> ## Compatibility The files provided are tested to work with AutoGPTQ, both via Transformers and using AutoGPTQ directly. They should also work with [Occ4m's GPTQ-for-LLaMa fork](https://github.com/0cc4m/KoboldAI). [ExLlama](https://github.com/turboderp/exllama) is compatible with Llama models in 4-bit. Please see the Provided Files table above for per-file compatibility. [Huggingface Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) is compatible with all GPTQ models. <!-- README_GPTQ.md-compatibility end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Alicia Loh, Stephen Murray, K, Ajan Kanaga, RoA, Magnesian, Deo Leter, Olakabola, Eugene Pentland, zynix, Deep Realms, Raymond Fosdick, Elijah Stavena, Iucharbius, Erik Bjäreholt, Luis Javier Navarrete Lozano, Nicholas, theTransient, John Detwiler, alfie_i, knownsqashed, Mano Prime, Willem Michiel, Enrico Ros, LangChain4j, OG, Michael Dempsey, Pierre Kircher, Pedro Madruga, James Bentley, Thomas Belote, Luke @flexchar, Leonard Tan, Johann-Peter Hartmann, Illia Dulskyi, Fen Risland, Chadd, S_X, Jeff Scroggin, Ken Nordquist, Sean Connelly, Artur Olbinski, Swaroop Kallakuri, Jack West, Ai Maven, David Ziegler, Russ Johnson, transmissions 11, John Villwock, Alps Aficionado, Clay Pascal, Viktor Bowallius, Subspace Studios, Rainer Wilmers, Trenton Dambrowitz, vamX, Michael Levine, 준교 김, Brandon Frisco, Kalila, Trailburnt, Randy H, Talal Aujan, Nathan Dryer, Vadim, 阿明, ReadyPlayerEmma, Tiffany J. Kim, George Stoitzev, Spencer Kim, Jerry Meng, Gabriel Tamborski, Cory Kujawski, Jeffrey Morgan, Spiking Neurons AB, Edmond Seymore, Alexandros Triantafyllidis, Lone Striker, Cap'n Zoog, Nikolai Manek, danny, ya boyyy, Derek Yates, usrbinkat, Mandus, TL, Nathan LeClaire, subjectnull, Imad Khwaja, webtim, Raven Klaugh, Asp the Wyvern, Gabriel Puliatti, Caitlyn Gatomon, Joseph William Delisle, Jonathan Leane, Luke Pendergrass, SuperWojo, Sebastain Graf, Will Dee, Fred von Graf, Andrey, Dan Guido, Daniel P. Andersen, Nitin Borwankar, Elle, Vitor Caleffi, biorpg, jjj, NimbleBox.ai, Pieter, Matthew Berman, terasurfer, Michael Davis, Alex, Stanislav Ovsiannikov Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> # Original model card: Meta's CodeLlama 7B Instruct # **Code Llama** Code Llama is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 34 billion parameters. This is the repository for the 7B instruct-tuned version in the Hugging Face Transformers format. This model is designed for general code synthesis and understanding. Links to other models can be found in the index at the bottom. | | Base Model | Python | Instruct | | --- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | | 7B | [codellama/CodeLlama-7b-hf](https://huggingface.co/codellama/CodeLlama-7b-hf) | [codellama/CodeLlama-7b-Python-hf](https://huggingface.co/codellama/CodeLlama-7b-Python-hf) | [codellama/CodeLlama-7b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf) | | 13B | [codellama/CodeLlama-13b-hf](https://huggingface.co/codellama/CodeLlama-13b-hf) | [codellama/CodeLlama-13b-Python-hf](https://huggingface.co/codellama/CodeLlama-13b-Python-hf) | [codellama/CodeLlama-13b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-13b-Instruct-hf) | | 34B | [codellama/CodeLlama-34b-hf](https://huggingface.co/codellama/CodeLlama-34b-hf) | [codellama/CodeLlama-34b-Python-hf](https://huggingface.co/codellama/CodeLlama-34b-Python-hf) | [codellama/CodeLlama-34b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf) | ## Model Use To use this model, please make sure to install transformers from `main` until the next version is released: ```bash pip install git+https://github.com/huggingface/transformers.git@main accelerate ``` Model capabilities: - [x] Code completion. - [x] Infilling. - [x] Instructions / chat. - [ ] Python specialist. ## Model Details *Note: Use of this model is governed by the Meta license. Meta developed and publicly released the Code Llama family of large language models (LLMs). **Model Developers** Meta **Variations** Code Llama comes in three model sizes, and three variants: * Code Llama: base models designed for general code synthesis and understanding * Code Llama - Python: designed specifically for Python * Code Llama - Instruct: for instruction following and safer deployment All variants are available in sizes of 7B, 13B and 34B parameters. **This repository contains the Instruct version of the 7B parameters model.** **Input** Models input text only. **Output** Models generate text only. **Model Architecture** Code Llama is an auto-regressive language model that uses an optimized transformer architecture. **Model Dates** Code Llama and its variants have been trained between January 2023 and July 2023. **Status** This is a static model trained on an offline dataset. Future versions of Code Llama - Instruct will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) **Research Paper** More information can be found in the paper "[Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/)" or its [arXiv page](https://arxiv.org/abs/2308.12950). ## Intended Use **Intended Use Cases** Code Llama and its variants is intended for commercial and research use in English and relevant programming languages. The base model Code Llama can be adapted for a variety of code synthesis and understanding tasks, Code Llama - Python is designed specifically to handle the Python programming language, and Code Llama - Instruct is intended to be safer to use for code assistant and generation applications. **Out-of-Scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws). Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Code Llama and its variants. ## Hardware and Software **Training Factors** We used custom training libraries. The training and fine-tuning of the released models have been performed Meta’s Research Super Cluster. **Carbon Footprint** In aggregate, training all 9 Code Llama models required 400K GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 65.3 tCO2eq, 100% of which were offset by Meta’s sustainability program. ## Training Data All experiments reported here and the released models have been trained and fine-tuned using the same data as Llama 2 with different weights (see Section 2 and Table 1 in the [research paper](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) for details). ## Evaluation Results See evaluations for the main models and detailed ablations in Section 3 and safety evaluations in Section 4 of the research paper. ## Ethical Considerations and Limitations Code Llama and its variants are a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Code Llama’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate or objectionable responses to user prompts. Therefore, before deploying any applications of Code Llama, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available available at [https://ai.meta.com/llama/responsible-user-guide](https://ai.meta.com/llama/responsible-user-guide).
microsoft/speecht5_hifigan
microsoft
"2023-02-02T13:08:06Z"
93,404
16
transformers
[ "transformers", "pytorch", "hifigan", "audio", "license:mit", "endpoints_compatible", "region:us" ]
null
"2023-02-02T13:06:10Z"
--- license: mit tags: - audio --- # SpeechT5 HiFi-GAN Vocoder This is the HiFi-GAN vocoder for use with the SpeechT5 text-to-speech and voice conversion models. SpeechT5 was first released in [this repository](https://github.com/microsoft/SpeechT5/), [original weights](https://huggingface.co/mechanicalsea/speecht5-tts). The license used is [MIT](https://github.com/microsoft/SpeechT5/blob/main/LICENSE). Disclaimer: The team releasing SpeechT5 did not write a model card for this model so this model card has been written by the Hugging Face team. ## Citation **BibTeX:** ```bibtex @inproceedings{ao-etal-2022-speecht5, title = {{S}peech{T}5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing}, author = {Ao, Junyi and Wang, Rui and Zhou, Long and Wang, Chengyi and Ren, Shuo and Wu, Yu and Liu, Shujie and Ko, Tom and Li, Qing and Zhang, Yu and Wei, Zhihua and Qian, Yao and Li, Jinyu and Wei, Furu}, booktitle = {Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, month = {May}, year = {2022}, pages={5723--5738}, } ```
CofeAI/FLM-2-52B-Instruct-2407
CofeAI
"2024-07-29T05:05:34Z"
93,364
11
transformers
[ "transformers", "pytorch", "TeleFLM", "text-generation", "conversational", "custom_code", "arxiv:2311.18743", "arxiv:2407.02783", "arxiv:2404.16645", "autotrain_compatible", "region:us" ]
text-generation
"2024-07-22T06:44:06Z"
# Introduction FLM-2 (aka Tele-FLM) is our open-source large language model series. The FLM-2 series demonstrate superior performances at its scale, and sometimes surpass larger models. The currently released versions include (Tele-FLM)[https://huggingface.co/CofeAI/Tele-FLM] and (Tele-FLM-1T)[https://huggingface.co/CofeAI/Tele-FLM-1T]. These models feature a stable, efficient pre-training paradigm and enhanced factual judgment capabilities. This repo contains the instruction-tuned 52B Tele-FLM model, which we have named FLM-2-52B-Instruct. # Model Details FLM-2-52B-Instruct utilizes the standard GPT-style decoder-only transformer architecture with a few adjustments: * Rotary Positional Embedding (RoPE) * RMSNorm for normalization * SwiGLU for activation function * Linear bias disabled * Embedding and language model head untied * Input and output multiplier | Models | layer<br>number | attention<br>heads | hidden<br>size | ffn hidden<br>size | vocab<br>size | params<br>count | | ------------- | :-------------: | :----------------: | :------------: | :----------------: | :-----------: | :--------------: | | FLM-2-52B-Instruct-2407 | 64 | 64 | 8,192 | 21,824 | 80,000 | 52.85 B | # Training details Unlike conventional fine-tuning methods, we employed an innovative and cost-effective fine-tuning approach. Through specialized screening techniques, we meticulously selected 30,735 samples from a large corpus of fine-tuning data. This refined dataset facilitated the fine-tuning process and yielded promising results. # Quickstart Here provides simple code for loading the tokenizer, loading the model, and generating contents. ```python import torch from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained('CofeAI/FLM-2-52B-Instruct-2407', trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained('CofeAI/FLM-2-52B-Instruct-2407', torch_dtype=torch.bfloat16, low_cpu_mem_usage=True, device_map="auto", trust_remote_code=True) history = [ {"role": "user", "content": "你好"}, {"role": "assistant", "content": "你好"}, {"role": "user", "content": "北京有哪些必去的景点?"} ] inputs = tokenizer.apply_chat_template(history, return_tensors='pt').to(model.device) response = model.generate(inputs, max_new_tokens=128, repetition_penalty=1.03) print(tokenizer.decode(response.cpu()[0], skip_special_tokens=True)) ``` # Evaluation We evaluate the alignment performance of FLM-2-52B-Instruct-2407 in Chinese across various domains utilizing [AlignBench](https://arxiv.org/pdf/2311.18743). AlignBench is a comprehensive and multidimensional evaluation benchmark designed to assess Chinese large language models’ alignment performance. It encompasses 8 categories with a total of 683 question-answer pairs, covering areas such as fundamental language ability (Fund.), Chinese advanced understanding (Chi.), open-ended questions (Open.), writing ability (Writ.), logical reasoning (Logi.), mathematics (Math.), task-oriented role playing (Role.), and professional knowledge (Pro.). | Models | Overall | Math. | Logi. | Fund. | Chi. | Open. | Writ. | Role. | Pro. | | ----------------------- | :-------: | :-----: | :-----: | :-----: | :----: | :-----: | :-----: | :-----: | :----: | | gpt-4-1106-preview | **7.58** | **7.39** | **6.83** | **7.69** |<u>7.07</u>| **8.66** | **8.23** | **8.08** | **8.55** | | gpt-4-0613 | <u>6.83</u> |<u>6.33</u>|<u>5.15</u>| 7.16 | 6.76 | 7.26 | 7.31 | 7.48 | 7.56 | | gpt-3.5-turbo-0613 | 5.68 | 4.90 | 4.79 | 6.01 | 5.60 | 6.97 | 7.27 | 6.98 | 6.29 | | chatglm-turbo | 6.36 | 4.88 | 5.09 |<u>7.50</u>| 7.03 |<u>8.45</u>| 8.05 | 7.67 | 7.70 | | FLM-2-52B-Instruct-2407 | 6.23 | 3.79 |<u>5.15</u>| **7.69** | **7.86** |<u>8.45</u>|<u>8.17</u>|<u>7.88</u>|<u>7.85</u>| # Citation If you find our work helpful, please consider citing it. ``` @article{tele-flm-1t, author = {Xiang Li and Yiqun Yao and Xin Jiang and Xuezhi Fang and Chao Wang and Xinzhang Liu and Zihan Wang and Yu Zhao and Xin Wang and Yuyao Huang and Shuangyong Song and Yongxiang Li and Zheng Zhang and Bo Zhao and Aixin Sun and Yequan Wang and Zhongjiang He and Zhongyuan Wang and Xuelong Li and Tiejun Huang}, title = {52B to 1T: Lessons Learned via Tele-FLM Series}, journal = {CoRR}, volume = {abs/2407.02783}, year = {2024}, url = {https://doi.org/10.48550/arXiv.2407.02783}, doi = {10.48550/ARXIV.2407.02783}, eprinttype = {arXiv}, eprint = {2407.02783}, } @article{tele-flm-2024, author = {Xiang Li and Yiqun Yao and Xin Jiang and Xuezhi Fang and Chao Wang and Xinzhang Liu and Zihan Wang and Yu Zhao and Xin Wang and Yuyao Huang and Shuangyong Song and Yongxiang Li and Zheng Zhang and Bo Zhao and Aixin Sun and Yequan Wang and Zhongjiang He and Zhongyuan Wang and Xuelong Li and Tiejun Huang}, title = {Tele-FLM Technical Report}, journal = {CoRR}, volume = {abs/2404.16645}, year = {2024}, url = {https://doi.org/10.48550/arXiv.2404.16645}, doi = {10.48550/ARXIV.2404.16645}, eprinttype = {arXiv}, eprint = {2404.16645}, } ```
tohoku-nlp/bert-base-japanese-char-v3
tohoku-nlp
"2023-05-19T00:39:44Z"
93,281
5
transformers
[ "transformers", "pytorch", "tf", "jax", "bert", "pretraining", "ja", "dataset:cc100", "dataset:wikipedia", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
"2023-05-19T00:33:09Z"
--- license: apache-2.0 datasets: - cc100 - wikipedia language: - ja widget: - text: 東北大学で[MASK]の研究をしています。 --- # BERT base Japanese (character-level tokenization with whole word masking, CC-100 and jawiki-20230102) This is a [BERT](https://github.com/google-research/bert) model pretrained on texts in the Japanese language. This version of the model processes input texts with word-level tokenization based on the Unidic 2.1.2 dictionary (available in [unidic-lite](https://pypi.org/project/unidic-lite/) package), followed by character-level tokenization. Additionally, the model is trained with the whole word masking enabled for the masked language modeling (MLM) objective. The codes for the pretraining are available at [cl-tohoku/bert-japanese](https://github.com/cl-tohoku/bert-japanese/). ## Model architecture The model architecture is the same as the original BERT base model; 12 layers, 768 dimensions of hidden states, and 12 attention heads. ## Training Data The model is trained on the Japanese portion of [CC-100 dataset](https://data.statmt.org/cc-100/) and the Japanese version of Wikipedia. For Wikipedia, we generated a text corpus from the [Wikipedia Cirrussearch dump file](https://dumps.wikimedia.org/other/cirrussearch/) as of January 2, 2023. The corpus files generated from CC-100 and Wikipedia are 74.3GB and 4.9GB in size and consist of approximately 392M and 34M sentences, respectively. For the purpose of splitting texts into sentences, we used [fugashi](https://github.com/polm/fugashi) with [mecab-ipadic-NEologd](https://github.com/neologd/mecab-ipadic-neologd) dictionary (v0.0.7). ## Tokenization The texts are first tokenized by MeCab with the Unidic 2.1.2 dictionary and then split into characters. The vocabulary size is 7027. We used [fugashi](https://github.com/polm/fugashi) and [unidic-lite](https://github.com/polm/unidic-lite) packages for the tokenization. ## Training We trained the model first on the CC-100 corpus for 1M steps and then on the Wikipedia corpus for another 1M steps. For training of the MLM (masked language modeling) objective, we introduced whole word masking in which all of the subword tokens corresponding to a single word (tokenized by MeCab) are masked at once. For training of each model, we used a v3-8 instance of Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/). ## Licenses The pretrained models are distributed under the Apache License 2.0. ## Acknowledgments This model is trained with Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/) program.
medicalai/ClinicalBERT
medicalai
"2023-09-15T08:46:54Z"
93,248
178
transformers
[ "transformers", "pytorch", "distilbert", "fill-mask", "medical", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
"2023-03-19T15:04:41Z"
--- tags: - medical --- # ClinicalBERT <!-- Provide a quick summary of what the model is/does. --> This model card describes the ClinicalBERT model, which was trained on a large multicenter dataset with a large corpus of 1.2B words of diverse diseases we constructed. We then utilized a large-scale corpus of EHRs from over 3 million patient records to fine tune the base language model. ## Pretraining Data The ClinicalBERT model was trained on a large multicenter dataset with a large corpus of 1.2B words of diverse diseases we constructed. <!-- For more details, see here. --> ## Model Pretraining ### Pretraining Procedures The ClinicalBERT was initialized from BERT. Then the training followed the principle of masked language model, in which given a piece of text, we randomly replace some tokens by MASKs, special tokens for masking, and then require the model to predict the original tokens via contextual text. ### Pretraining Hyperparameters We used a batch size of 32, a maximum sequence length of 256, and a learning rate of 5e-5 for pre-training our models. ## How to use the model Load the model via the transformers library: ```python from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained("medicalai/ClinicalBERT") model = AutoModel.from_pretrained("medicalai/ClinicalBERT") ``` ## Citation Please cite this article: Wang, G., Liu, X., Ying, Z. et al. Optimized glycemic control of type 2 diabetes with reinforcement learning: a proof-of-concept trial. Nat Med (2023). https://doi.org/10.1038/s41591-023-02552-9
InstantX/InstantID
InstantX
"2024-01-22T09:43:05Z"
93,144
722
diffusers
[ "diffusers", "safetensors", "text-to-image", "en", "arxiv:2401.07519", "license:apache-2.0", "region:us" ]
text-to-image
"2024-01-19T11:52:05Z"
--- license: apache-2.0 language: - en library_name: diffusers pipeline_tag: text-to-image --- # InstantID Model Card <div align="center"> [**Project Page**](https://instantid.github.io/) **|** [**Paper**](https://arxiv.org/abs/2401.07519) **|** [**Code**](https://github.com/InstantID/InstantID) **|** [🤗 **Gradio demo**](https://huggingface.co/spaces/InstantX/InstantID) </div> ## Introduction InstantID is a new state-of-the-art tuning-free method to achieve ID-Preserving generation with only single image, supporting various downstream tasks. <div align="center"> <img src='examples/applications.png'> </div> ## Usage You can directly download the model in this repository. You also can download the model in python script: ```python from huggingface_hub import hf_hub_download hf_hub_download(repo_id="InstantX/InstantID", filename="ControlNetModel/config.json", local_dir="./checkpoints") hf_hub_download(repo_id="InstantX/InstantID", filename="ControlNetModel/diffusion_pytorch_model.safetensors", local_dir="./checkpoints") hf_hub_download(repo_id="InstantX/InstantID", filename="ip-adapter.bin", local_dir="./checkpoints") ``` For face encoder, you need to manutally download via this [URL](https://github.com/deepinsight/insightface/issues/1896#issuecomment-1023867304) to `models/antelopev2`. ```python # !pip install opencv-python transformers accelerate insightface import diffusers from diffusers.utils import load_image from diffusers.models import ControlNetModel import cv2 import torch import numpy as np from PIL import Image from insightface.app import FaceAnalysis from pipeline_stable_diffusion_xl_instantid import StableDiffusionXLInstantIDPipeline, draw_kps # prepare 'antelopev2' under ./models app = FaceAnalysis(name='antelopev2', root='./', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) app.prepare(ctx_id=0, det_size=(640, 640)) # prepare models under ./checkpoints face_adapter = f'./checkpoints/ip-adapter.bin' controlnet_path = f'./checkpoints/ControlNetModel' # load IdentityNet controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16) pipe = StableDiffusionXLInstantIDPipeline.from_pretrained( ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16 ... ) pipe.cuda() # load adapter pipe.load_ip_adapter_instantid(face_adapter) ``` Then, you can customized your own face images ```python # load an image image = load_image("your-example.jpg") # prepare face emb face_info = app.get(cv2.cvtColor(np.array(face_image), cv2.COLOR_RGB2BGR)) face_info = sorted(face_info, key=lambda x:(x['bbox'][2]-x['bbox'][0])*x['bbox'][3]-x['bbox'][1])[-1] # only use the maximum face face_emb = face_info['embedding'] face_kps = draw_kps(face_image, face_info['kps']) pipe.set_ip_adapter_scale(0.8) prompt = "analog film photo of a man. faded film, desaturated, 35mm photo, grainy, vignette, vintage, Kodachrome, Lomography, stained, highly detailed, found footage, masterpiece, best quality" negative_prompt = "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured (lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch,deformed, mutated, cross-eyed, ugly, disfigured" # generate image image = pipe( ... prompt, image_embeds=face_emb, image=face_kps, controlnet_conditioning_scale=0.8 ... ).images[0] ``` For more details, please follow the instructions in our [GitHub repository](https://github.com/InstantID/InstantID). ## Usage Tips 1. If you're not satisfied with the similarity, try to increase the weight of "IdentityNet Strength" and "Adapter Strength". 2. If you feel that the saturation is too high, first decrease the Adapter strength. If it is still too high, then decrease the IdentityNet strength. 3. If you find that text control is not as expected, decrease Adapter strength. 4. If you find that realistic style is not good enough, go for our Github repo and use a more realistic base model. ## Demos <div align="center"> <img src='examples/0.png'> </div> <div align="center"> <img src='examples/1.png'> </div> ## Disclaimer This project is released under Apache License and aims to positively impact the field of AI-driven image generation. Users are granted the freedom to create images using this tool, but they are obligated to comply with local laws and utilize it responsibly. The developers will not assume any responsibility for potential misuse by users. ## Citation ```bibtex @article{wang2024instantid, title={InstantID: Zero-shot Identity-Preserving Generation in Seconds}, author={Wang, Qixun and Bai, Xu and Wang, Haofan and Qin, Zekui and Chen, Anthony}, journal={arXiv preprint arXiv:2401.07519}, year={2024} } ```
citizenlab/twitter-xlm-roberta-base-sentiment-finetunned
citizenlab
"2022-12-02T13:49:38Z"
92,999
32
transformers
[ "transformers", "pytorch", "xlm-roberta", "text-classification", "en", "nl", "fr", "pt", "it", "es", "de", "da", "pl", "af", "dataset:jigsaw_toxicity_pred", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
"2022-03-02T23:29:05Z"
--- pipeline_type: "text-classification" widget: - text: "this is a lovely message" example_title: "Example 1" multi_class: false - text: "you are an idiot and you and your family should go back to your country" example_title: "Example 2" multi_class: false language: - en - nl - fr - pt - it - es - de - da - pl - af datasets: - jigsaw_toxicity_pred metrics: - F1 Accuracy --- # citizenlab/twitter-xlm-roberta-base-sentiment-finetunned This is multilingual XLM-Roberta model sequence classifier fine tunned and based on [Cardiff NLP Group](cardiffnlp/twitter-roberta-base-sentiment) sentiment classification model. ## How to use it ```python from transformers import pipeline model_path = "citizenlab/twitter-xlm-roberta-base-sentiment-finetunned" sentiment_classifier = pipeline("text-classification", model=model_path, tokenizer=model_path) sentiment_classifier("this is a lovely message") > [{'label': 'Positive', 'score': 0.9918450713157654}] sentiment_classifier("you are an idiot and you and your family should go back to your country") > [{'label': 'Negative', 'score': 0.9849833846092224}] ``` ## Evaluation ``` precision recall f1-score support Negative 0.57 0.14 0.23 28 Neutral 0.78 0.94 0.86 132 Positive 0.89 0.80 0.85 51 accuracy 0.80 211 macro avg 0.75 0.63 0.64 211 weighted avg 0.78 0.80 0.77 211 ```
tohoku-nlp/bert-base-japanese-char-v2
tohoku-nlp
"2021-09-23T13:45:24Z"
92,999
6
transformers
[ "transformers", "pytorch", "tf", "jax", "bert", "fill-mask", "ja", "dataset:wikipedia", "license:cc-by-sa-4.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
"2022-03-02T23:29:05Z"
--- language: ja license: cc-by-sa-4.0 datasets: - wikipedia widget: - text: 東北大学で[MASK]の研究をしています。 --- # BERT base Japanese (character-level tokenization with whole word masking, jawiki-20200831) This is a [BERT](https://github.com/google-research/bert) model pretrained on texts in the Japanese language. This version of the model processes input texts with word-level tokenization based on the Unidic 2.1.2 dictionary (available in [unidic-lite](https://pypi.org/project/unidic-lite/) package), followed by character-level tokenization. Additionally, the model is trained with the whole word masking enabled for the masked language modeling (MLM) objective. The codes for the pretraining are available at [cl-tohoku/bert-japanese](https://github.com/cl-tohoku/bert-japanese/tree/v2.0). ## Model architecture The model architecture is the same as the original BERT base model; 12 layers, 768 dimensions of hidden states, and 12 attention heads. ## Training Data The models are trained on the Japanese version of Wikipedia. The training corpus is generated from the Wikipedia Cirrussearch dump file as of August 31, 2020. The generated corpus files are 4.0GB in total, containing approximately 30M sentences. We used the [MeCab](https://taku910.github.io/mecab/) morphological parser with [mecab-ipadic-NEologd](https://github.com/neologd/mecab-ipadic-neologd) dictionary to split texts into sentences. ## Tokenization The texts are first tokenized by MeCab with the Unidic 2.1.2 dictionary and then split into characters. The vocabulary size is 6144. We used [`fugashi`](https://github.com/polm/fugashi) and [`unidic-lite`](https://github.com/polm/unidic-lite) packages for the tokenization. ## Training The models are trained with the same configuration as the original BERT; 512 tokens per instance, 256 instances per batch, and 1M training steps. For training of the MLM (masked language modeling) objective, we introduced whole word masking in which all of the subword tokens corresponding to a single word (tokenized by MeCab) are masked at once. For training of each model, we used a v3-8 instance of Cloud TPUs provided by [TensorFlow Research Cloud program](https://www.tensorflow.org/tfrc/). The training took about 5 days to finish. ## Licenses The pretrained models are distributed under the terms of the [Creative Commons Attribution-ShareAlike 3.0](https://creativecommons.org/licenses/by-sa/3.0/). ## Acknowledgments This model is trained with Cloud TPUs provided by [TensorFlow Research Cloud](https://www.tensorflow.org/tfrc/) program.
pritamdeka/S-PubMedBert-MS-MARCO
pritamdeka
"2024-09-05T09:39:02Z"
92,476
26
sentence-transformers
[ "sentence-transformers", "pytorch", "bert", "feature-extraction", "sentence-similarity", "transformers", "license:cc-by-nc-2.0", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
sentence-similarity
"2022-03-02T23:29:05Z"
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers license: cc-by-nc-2.0 --- # pritamdeka/S-PubMedBert-MS-MARCO This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. This is the [microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext](https://huggingface.co/microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext) model which has been fine-tuned over the MS-MARCO dataset using sentence-transformers framework. It can be used for the information retrieval task in the medical/health text domain. ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('pritamdeka/S-PubMedBert-MS-MARCO') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('pritamdeka/S-PubMedBert-MS-MARCO') model = AutoModel.from_pretrained('pritamdeka/S-PubMedBert-MS-MARCO') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, max pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` <!--- ## Evaluation Results --> <!--- Describe how your model was evaluated --> <!--- For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) --> ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 31434 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `beir.losses.margin_mse_loss.MarginMSELoss` Parameters of the fit()-Method: ``` { "callback": null, "epochs": 2, "evaluation_steps": 10000, "evaluator": "sentence_transformers.evaluation.SequentialEvaluator.SequentialEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "correct_bias": false, "eps": 1e-06, "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 1000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 350, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information --> ``` @article{deka2022improved, title={Improved Methods To Aid Unsupervised Evidence-Based Fact Checking For Online Health News}, author={Deka, Pritam and Jurek-Loughrey, Anna and Deepak, P}, journal={Journal of Data Intelligence}, volume={3}, number={4}, pages={474--504}, year={2022} } ```
Qwen/Qwen2-7B-Instruct-AWQ
Qwen
"2024-08-21T10:30:36Z"
92,234
19
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "chat", "conversational", "en", "arxiv:2309.00071", "base_model:Qwen/Qwen2-7B-Instruct", "base_model:quantized:Qwen/Qwen2-7B-Instruct", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "4-bit", "awq", "region:us" ]
text-generation
"2024-06-06T06:18:35Z"
--- license: apache-2.0 language: - en pipeline_tag: text-generation tags: - chat base_model: Qwen/Qwen2-7B-Instruct --- # Qwen2-7B-Instruct-AWQ ## Introduction Qwen2 is the new series of Qwen large language models. For Qwen2, we release a number of base language models and instruction-tuned language models ranging from 0.5 to 72 billion parameters, including a Mixture-of-Experts model. This repo contains the instruction-tuned 7B Qwen2 model. Compared with the state-of-the-art opensource language models, including the previous released Qwen1.5, Qwen2 has generally surpassed most opensource models and demonstrated competitiveness against proprietary models across a series of benchmarks targeting for language understanding, language generation, multilingual capability, coding, mathematics, reasoning, etc. Qwen2-7B-Instruct-AWQ supports a context length of up to 131,072 tokens, enabling the processing of extensive inputs. Please refer to [this section](#processing-long-texts) for detailed instructions on how to deploy Qwen2 for handling long texts. For more details, please refer to our [blog](https://qwenlm.github.io/blog/qwen2/), [GitHub](https://github.com/QwenLM/Qwen2), and [Documentation](https://qwen.readthedocs.io/en/latest/). <br> ## Model Details Qwen2 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. ## Training details We pretrained the models with a large amount of data, and we post-trained the models with both supervised finetuning and direct preference optimization. ## Requirements The code of Qwen2 has been in the latest Hugging face transformers and we advise you to install `transformers>=4.37.0`, or you might encounter the following error: ``` KeyError: 'qwen2' ``` ## Quickstart Here provides a code snippet with `apply_chat_template` to show you how to load the tokenizer and model and how to generate contents. ```python from transformers import AutoModelForCausalLM, AutoTokenizer device = "cuda" # the device to load the model onto model = AutoModelForCausalLM.from_pretrained( "Qwen/Qwen2-7B-Instruct-AWQ", torch_dtype="auto", device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-7B-Instruct-AWQ") prompt = "Give me a short introduction to large language model." messages = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": prompt} ] text = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) model_inputs = tokenizer([text], return_tensors="pt").to(device) generated_ids = model.generate( model_inputs.input_ids, max_new_tokens=512 ) generated_ids = [ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) ] response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] ``` ### Processing Long Texts To handle extensive inputs exceeding 32,768 tokens, we utilize [YARN](https://arxiv.org/abs/2309.00071), a technique for enhancing model length extrapolation, ensuring optimal performance on lengthy texts. For deployment, we recommend using vLLM. You can enable the long-context capabilities by following these steps: 1. **Install vLLM**: You can install vLLM by running the following command. ```bash pip install "vllm>=0.4.3" ``` Or you can install vLLM from [source](https://github.com/vllm-project/vllm/). 2. **Configure Model Settings**: After downloading the model weights, modify the `config.json` file by including the below snippet: ```json { "architectures": [ "Qwen2ForCausalLM" ], // ... "vocab_size": 152064, // adding the following snippets "rope_scaling": { "factor": 4.0, "original_max_position_embeddings": 32768, "type": "yarn" } } ``` This snippet enable YARN to support longer contexts. 3. **Model Deployment**: Utilize vLLM to deploy your model. For instance, you can set up an openAI-like server using the command: ```bash python -m vllm.entrypoints.openai.api_server --served-model-name Qwen2-7B-Instruct-AWQ --model path/to/weights ``` Then you can access the Chat API by: ```bash curl http://localhost:8000/v1/chat/completions \ -H "Content-Type: application/json" \ -d '{ "model": "Qwen2-7B-Instruct-AWQ", "messages": [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Your Long Input Here."} ] }' ``` For further usage instructions of vLLM, please refer to our [Github](https://github.com/QwenLM/Qwen2). **Note**: Presently, vLLM only supports static YARN, which means the scaling factor remains constant regardless of input length, **potentially impacting performance on shorter texts**. We advise adding the `rope_scaling` configuration only when processing long contexts is required. ## Benchmark and Speed To compare the generation performance between bfloat16 (bf16) and quantized models such as GPTQ-Int8, GPTQ-Int4, and AWQ, please consult our [Benchmark of Quantized Models](https://qwen.readthedocs.io/en/latest/benchmark/quantization_benchmark.html). This benchmark provides insights into how different quantization techniques affect model performance. For those interested in understanding the inference speed and memory consumption when deploying these models with either ``transformer`` or ``vLLM``, we have compiled an extensive [Speed Benchmark](https://qwen.readthedocs.io/en/latest/benchmark/speed_benchmark.html). ## Citation If you find our work helpful, feel free to give us a cite. ``` @article{qwen2, title={Qwen2 Technical Report}, year={2024} } ```
Dongjin-kr/ko-reranker
Dongjin-kr
"2024-05-08T03:50:35Z"
92,214
47
transformers
[ "transformers", "pytorch", "safetensors", "xlm-roberta", "text-classification", "ko", "en", "arxiv:2307.03172", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
"2023-12-22T07:50:36Z"
--- license: mit language: - ko - en pipeline_tag: text-classification --- # Korean Reranker Training on Amazon SageMaker ### **한국어 Reranker** 개발을 위한 파인튜닝 가이드를 제시합니다. ko-reranker는 [BAAI/bge-reranker-larger](https://huggingface.co/BAAI/bge-reranker-large) 기반 한국어 데이터에 대한 fine-tuned model 입니다. <br> 보다 자세한 사항은 [korean-reranker-git](https://github.com/aws-samples/aws-ai-ml-workshop-kr/tree/master/genai/aws-gen-ai-kr/30_fine_tune/reranker-kr) / [AWS Blog, 한국어 Reranker를 활용한 검색 증강 생성(RAG) 성능 올리기](https://aws.amazon.com/ko/blogs/tech/korean-reranker-rag/)을 참고하세요 - - - ## 0. Features - #### <span style="#FF69B4;"> Reranker는 임베딩 모델과 달리 질문과 문서를 입력으로 사용하며 임베딩 대신 유사도를 직접 출력합니다.</span> - #### <span style="#FF69B4;"> Reranker에 질문과 구절을 입력하면 연관성 점수를 얻을 수 있습니다.</span> - #### <span style="#FF69B4;"> Reranker는 CrossEntropy loss를 기반으로 최적화되므로 관련성 점수가 특정 범위에 국한되지 않습니다.</span> ## 1.Usage - using Transformers ``` def exp_normalize(x): b = x.max() y = np.exp(x - b) return y / y.sum() from transformers import AutoModelForSequenceClassification, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_path) model = AutoModelForSequenceClassification.from_pretrained(model_path) model.eval() pairs = [["나는 너를 싫어해", "나는 너를 사랑해"], \ ["나는 너를 좋아해", "너에 대한 나의 감정은 사랑 일 수도 있어"]] with torch.no_grad(): inputs = tokenizer(pairs, padding=True, truncation=True, return_tensors='pt', max_length=512) scores = model(**inputs, return_dict=True).logits.view(-1, ).float() scores = exp_normalize(scores.numpy()) print (f'first: {scores[0]}, second: {scores[1]}') ``` - using SageMaker ``` import sagemaker import boto3 from sagemaker.huggingface import HuggingFaceModel try: role = sagemaker.get_execution_role() except ValueError: iam = boto3.client('iam') role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'Dongjin-kr/ko-reranker', 'HF_TASK':'text-classification' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.28.1', pytorch_version='2.0.0', py_version='py310', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.g5.large' # ec2 instance type ) runtime_client = boto3.Session().client('sagemaker-runtime') payload = json.dumps( { "inputs": [ {"text": "나는 너를 싫어해", "text_pair": "나는 너를 사랑해"}, {"text": "나는 너를 좋아해", "text_pair": "너에 대한 나의 감정은 사랑 일 수도 있어"} ] } ) response = runtime_client.invoke_endpoint( EndpointName="<endpoint-name>", ContentType="application/json", Accept="application/json", Body=payload ) ## deserialization out = json.loads(response['Body'].read().decode()) ## for json print (f'Response: {out}') ``` ## 2. Backgound - #### <span style="#FF69B4;"> **컨택스트 순서가 정확도에 영향 준다**([Lost in Middle, *Liu et al., 2023*](https://arxiv.org/pdf/2307.03172.pdf)) </span> - #### <span style="#FF69B4;"> [Reranker 사용해야 하는 이유](https://www.pinecone.io/learn/series/rag/rerankers/)</span> - 현재 LLM은 context 많이 넣는다고 좋은거 아님, relevant한게 상위에 있어야 정답을 잘 말해준다 - Semantic search에서 사용하는 similarity(relevant) score가 정교하지 않다. (즉, 상위 랭커면 하위 랭커보다 항상 더 질문에 유사한 정보가 맞아?) * Embedding은 meaning behind document를 가지는 것에 특화되어 있다. * 질문과 정답이 의미상 같은건 아니다. ([Hypothetical Document Embeddings](https://medium.com/prompt-engineering/hyde-revolutionising-search-with-hypothetical-document-embeddings-3474df795af8)) * ANNs([Approximate Nearest Neighbors](https://towardsdatascience.com/comprehensive-guide-to-approximate-nearest-neighbors-algorithms-8b94f057d6b6)) 사용에 따른 패널티 - - - ## 3. Reranker models - #### <span style="#FF69B4;"> [Cohere] [Reranker](https://txt.cohere.com/rerank/)</span> - #### <span style="#FF69B4;"> [BAAI] [bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large)</span> - #### <span style="#FF69B4;"> [BAAI] [bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base)</span> - - - ## 4. Dataset - #### <span style="#FF69B4;"> [msmarco-triplets](https://github.com/microsoft/MSMARCO-Passage-Ranking) </span> - (Question, Answer, Negative)-Triplets from MS MARCO Passages dataset, 499,184 samples - 해당 데이터 셋은 영문으로 구성되어 있습니다. - Amazon Translate 기반으로 번역하여 활용하였습니다. - #### <span style="#FF69B4;"> Format </span> ``` {"query": str, "pos": List[str], "neg": List[str]} ``` - Query는 질문이고, pos는 긍정 텍스트 목록, neg는 부정 텍스트 목록입니다. 쿼리에 대한 부정 텍스트가 없는 경우 전체 말뭉치에서 일부를 무작위로 추출하여 부정 텍스트로 사용할 수 있습니다. - #### <span style="#FF69B4;"> Example </span> ``` {"query": "대한민국의 수도는?", "pos": ["미국의 수도는 워싱턴이고, 일본은 도쿄이며 한국은 서울이다."], "neg": ["미국의 수도는 워싱턴이고, 일본은 도쿄이며 북한은 평양이다."]} ``` - - - ## 5. Performance | Model | has-right-in-contexts | mrr (mean reciprocal rank) | |:---------------------------|:-----------------:|:--------------------------:| | without-reranker (default)| 0.93 | 0.80 | | with-reranker (bge-reranker-large)| 0.95 | 0.84 | | **with-reranker (fine-tuned using korean)** | **0.96** | **0.87** | - **evaluation set**: ```code ./dataset/evaluation/eval_dataset.csv ``` - **training parameters**: ```json { "learning_rate": 5e-6, "fp16": True, "num_train_epochs": 3, "per_device_train_batch_size": 1, "gradient_accumulation_steps": 32, "train_group_size": 3, "max_len": 512, "weight_decay": 0.01, } ``` - - - ## 6. Acknowledgement - <span style="#FF69B4;"> Part of the code is developed based on [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding/tree/master?tab=readme-ov-file) and [KoSimCSE-SageMaker](https://github.com/daekeun-ml/KoSimCSE-SageMaker/tree/7de6eefef8f1a646c664d0888319d17480a3ebe5).</span> - - - ## 7. Citation - <span style="#FF69B4;"> If you find this repository useful, please consider giving a like ⭐ and citation</span> - - - ## 8. Contributors: - <span style="#FF69B4;"> **Dongjin Jang, Ph.D.** (AWS AI/ML Specislist Solutions Architect) | [Mail](mailto:dongjinj@amazon.com) | [Linkedin](https://www.linkedin.com/in/dongjin-jang-kr/) | [Git](https://github.com/dongjin-ml) | </span> - - - ## 9. License - <span style="#FF69B4;"> FlagEmbedding is licensed under the [MIT License](https://github.com/aws-samples/aws-ai-ml-workshop-kr/blob/master/LICENSE). </span> ## 10. Analytics - [![Hits](https://hits.seeyoufarm.com/api/count/incr/badge.svg?url=https%3A%2F%2Fhuggingface.co%2FDongjin-kr%2Fko-reranker&count_bg=%2379C83D&title_bg=%23555555&icon=&icon_color=%23E7E7E7&title=hits&edge_flat=false)](https://hits.seeyoufarm.com)
w11wo/indonesian-roberta-base-sentiment-classifier
w11wo
"2023-05-13T04:10:11Z"
92,210
35
transformers
[ "transformers", "pytorch", "tf", "safetensors", "roberta", "text-classification", "indonesian-roberta-base-sentiment-classifier", "id", "dataset:indonlu", "arxiv:1907.11692", "doi:10.57967/hf/2997", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
"2022-03-02T23:29:05Z"
--- language: id tags: - indonesian-roberta-base-sentiment-classifier license: mit datasets: - indonlu widget: - text: "Jangan sampai saya telpon bos saya ya!" --- ## Indonesian RoBERTa Base Sentiment Classifier Indonesian RoBERTa Base Sentiment Classifier is a sentiment-text-classification model based on the [RoBERTa](https://arxiv.org/abs/1907.11692) model. The model was originally the pre-trained [Indonesian RoBERTa Base](https://hf.co/flax-community/indonesian-roberta-base) model, which is then fine-tuned on [`indonlu`](https://hf.co/datasets/indonlu)'s `SmSA` dataset consisting of Indonesian comments and reviews. After training, the model achieved an evaluation accuracy of 94.36% and F1-macro of 92.42%. On the benchmark test set, the model achieved an accuracy of 93.2% and F1-macro of 91.02%. Hugging Face's `Trainer` class from the [Transformers](https://huggingface.co/transformers) library was used to train the model. PyTorch was used as the backend framework during training, but the model remains compatible with other frameworks nonetheless. ## Model | Model | #params | Arch. | Training/Validation data (text) | | ---------------------------------------------- | ------- | ------------ | ------------------------------- | | `indonesian-roberta-base-sentiment-classifier` | 124M | RoBERTa Base | `SmSA` | ## Evaluation Results The model was trained for 5 epochs and the best model was loaded at the end. | Epoch | Training Loss | Validation Loss | Accuracy | F1 | Precision | Recall | | ----- | ------------- | --------------- | -------- | -------- | --------- | -------- | | 1 | 0.342600 | 0.213551 | 0.928571 | 0.898539 | 0.909803 | 0.890694 | | 2 | 0.190700 | 0.213466 | 0.934127 | 0.901135 | 0.925297 | 0.882757 | | 3 | 0.125500 | 0.219539 | 0.942857 | 0.920901 | 0.927511 | 0.915193 | | 4 | 0.083600 | 0.235232 | 0.943651 | 0.924227 | 0.926494 | 0.922048 | | 5 | 0.059200 | 0.262473 | 0.942063 | 0.920583 | 0.924084 | 0.917351 | ## How to Use ### As Text Classifier ```python from transformers import pipeline pretrained_name = "w11wo/indonesian-roberta-base-sentiment-classifier" nlp = pipeline( "sentiment-analysis", model=pretrained_name, tokenizer=pretrained_name ) nlp("Jangan sampai saya telpon bos saya ya!") ``` ## Disclaimer Do consider the biases which come from both the pre-trained RoBERTa model and the `SmSA` dataset that may be carried over into the results of this model. ## Author Indonesian RoBERTa Base Sentiment Classifier was trained and evaluated by [Wilson Wongso](https://w11wo.github.io/). All computation and development are done on Google Colaboratory using their free GPU access. ## Citation If used, please cite the following: ```bibtex @misc {wilson_wongso_2023, author = { {Wilson Wongso} }, title = { indonesian-roberta-base-sentiment-classifier (Revision e402e46) }, year = 2023, url = { https://huggingface.co/w11wo/indonesian-roberta-base-sentiment-classifier }, doi = { 10.57967/hf/0644 }, publisher = { Hugging Face } } ```
Systran/faster-distil-whisper-medium.en
Systran
"2024-01-19T03:59:58Z"
91,916
2
ctranslate2
[ "ctranslate2", "audio", "automatic-speech-recognition", "en", "license:mit", "region:us" ]
automatic-speech-recognition
"2024-01-19T03:23:01Z"
--- language: - en tags: - audio - automatic-speech-recognition license: mit library_name: ctranslate2 --- # Whisper medium.en model for CTranslate2 This repository contains the conversion of [distil-whisper/distil-medium.en](https://huggingface.co/distil-whisper/distil-medium.en) to the [CTranslate2](https://github.com/OpenNMT/CTranslate2) model format. This model can be used in CTranslate2 or projects based on CTranslate2 such as [faster-whisper](https://github.com/systran/faster-whisper). ## Example ```python from faster_whisper import WhisperModel model = WhisperModel("distil-medium.en") segments, info = model.transcribe("audio.mp3") for segment in segments: print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text)) ``` ## Conversion details The original model was converted with the following command: ``` ct2-transformers-converter --model distil-whisper/distil-medium.en --output_dir faster-distil-whisper-medium.en \ --copy_files tokenizer.json preprocessor_config.json --quantization float16 ``` Note that the model weights are saved in FP16. This type can be changed when the model is loaded using the [`compute_type` option in CTranslate2](https://opennmt.net/CTranslate2/quantization.html). ## More information **For more information about the original model, see its [model card](https://huggingface.co/distil-whisper/distil-medium.en).**
Corcelio/mobius
Corcelio
"2024-06-01T13:43:40Z"
91,882
223
diffusers
[ "diffusers", "safetensors", "text-to-image", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
"2024-05-12T16:01:24Z"
--- pipeline_tag: text-to-image widget: - text: >- movie scene screencap, cinematic footage. thanos smelling a little yellow rose. extreme wide angle, output: url: images/1man.png - text: god output: url: images/god.png - text: 'A tiny robot taking a break under a tree in the garden ' output: url: images/robot.png - text: mystery output: url: images/mystery.png - text: a cat wearing sunglasses in the summer output: url: images/cat.png - text: 'robot holding a sign that says ’a storm is coming’ ' output: url: images/storm.png - text: >- The Exegenesis of the soul, captured within a boundless well of starlight, pulsating and vibrating wisps, chiaroscuro, humming transformer output: url: images/soul.png - text: >- anime boy, protagonist, best quality output: url: images/animeboy.png - text: natural photography of a man, glasses, cinematic, output: url: images/glasses.png - text: if I could turn back time output: url: images/time.png - text: >- ("Mobius" text logo) powerful aura, swirling power, cinematic output: url: images/mobius.png - text: the backrooms output: url: images/backrooms.png license: apache-2.0 --- <Gallery /> # Mobius: Redefining State-of-the-Art in Debiased Diffusion Models Mobius, a diffusion model that pushes the boundaries of domain-agnostic debiasing and representation realignment. By employing a brand new constructive deconstruction framework, Mobius achieves unrivaled generalization across a vast array of styles and domains, eliminating the need for expensive pretraining from scratch. # Domain-Agnostic Debiasing: A Groundbreaking Approach Domain-agnostic debiasing is a novel technique pioneered Corcel. This innovative approach aims to remove biases inherent in diffusion models without limiting their ability to generalize across diverse domains. Traditional debiasing methods often focus on specific domains or styles, resulting in models that struggle to adapt to new or unseen contexts. In contrast, domain-agnostic debiasing ensures that the model remains unbiased while maintaining its versatility and adaptability. The key to domain-agnostic debiasing lies in the constructive deconstruction framework. This framework allows for fine-grained reworking of biases and representations without the need for pretraining from scratch. The technical details of this groundbreaking approach will be discussed in an upcoming research paper, "Constructive Deconstruction: Domain-Agnostic Debiasing of Diffusion Models," which will be made available on the Corcel.io website and through scientific publications. By applying domain-agnostic debiasing, Mobius sets a new standard for fairness and impartiality in image generation while maintaining its exceptional ability to adapt to a wide range of styles and domains. # Surpassing the State-of-the-Art Mobius outperforms existing state-of-the-art diffusion models in several key areas: Unbiased generation: Mobius generates images that are virtually free from the inherent biases commonly found in other diffusion models, setting a new benchmark for fairness and impartiality across all domains. Exceptional generalization: With its unparalleled ability to adapt to an extensive range of styles and domains, Mobius consistently delivers top-quality results, surpassing the limitations of previous models. Efficient fine-tuning: The Mobius base model serves as a superior foundation for creating specialized models tailored to specific tasks or domains, requiring significantly less fine-tuning and computational resources compared to other state-of-the-art models. # Recommendations - CFG between 3.5 and 7 - 3.5 for extreme realism and skin detailing - 7 for artstic, anime, surrealism, and so on. - Requires a CLIP skip of -3 - Sampler: DPM++ 3M SDE - Scheduler: Karras - Steps: 50 - Resolution: 1024x1024 Please also consider using these keep words to improve your prompts: best quality, HD, '~*~aesthetic~*~'. # Use it with 🧨 diffusers ```python import torch from diffusers import ( StableDiffusionXLPipeline, KDPM2AncestralDiscreteScheduler, AutoencoderKL ) # Load VAE component vae = AutoencoderKL.from_pretrained( "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16 ) # Configure the pipeline pipe = StableDiffusionXLPipeline.from_pretrained( "Corcelio/mobius", vae=vae, torch_dtype=torch.float16 ) pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to('cuda') # Define prompts and generate image prompt = "mystery" negative_prompt = "" image = pipe( prompt, negative_prompt=negative_prompt, width=1024, height=1024, guidance_scale=7, num_inference_steps=50, clip_skip=3 ).images[0] image.save("generated_image.png") ``` # Credits Made by Corcel [ https://corcel.io/ ]
facebook/mask2former-swin-large-mapillary-vistas-semantic
facebook
"2023-09-11T20:45:32Z"
91,789
2
transformers
[ "transformers", "pytorch", "safetensors", "mask2former", "vision", "image-segmentation", "dataset:coco", "arxiv:2112.01527", "arxiv:2107.06278", "license:other", "endpoints_compatible", "region:us" ]
image-segmentation
"2023-01-05T00:46:58Z"
--- license: other tags: - vision - image-segmentation datasets: - coco widget: - src: http://images.cocodataset.org/val2017/000000039769.jpg example_title: Cats - src: http://images.cocodataset.org/val2017/000000039770.jpg example_title: Castle --- # Mask2Former Mask2Former model trained on Mapillary Vistas semantic segmentation (large-sized version, Swin backbone). It was introduced in the paper [Masked-attention Mask Transformer for Universal Image Segmentation ](https://arxiv.org/abs/2112.01527) and first released in [this repository](https://github.com/facebookresearch/Mask2Former/). Disclaimer: The team releasing Mask2Former did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description Mask2Former addresses instance, semantic and panoptic segmentation with the same paradigm: by predicting a set of masks and corresponding labels. Hence, all 3 tasks are treated as if they were instance segmentation. Mask2Former outperforms the previous SOTA, [MaskFormer](https://arxiv.org/abs/2107.06278) both in terms of performance an efficiency by (i) replacing the pixel decoder with a more advanced multi-scale deformable attention Transformer, (ii) adopting a Transformer decoder with masked attention to boost performance without without introducing additional computation and (iii) improving training efficiency by calculating the loss on subsampled points instead of whole masks. ![model image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/mask2former_architecture.png) ## Intended uses & limitations You can use this particular checkpoint for panoptic segmentation. See the [model hub](https://huggingface.co/models?search=mask2former) to look for other fine-tuned versions on a task that interests you. ### How to use Here is how to use this model: ```python import requests import torch from PIL import Image from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation # load Mask2Former fine-tuned on Mapillary Vistas semantic segmentation processor = AutoImageProcessor.from_pretrained("facebook/mask2former-swin-large-mapillary-vistas-semantic") model = Mask2FormerForUniversalSegmentation.from_pretrained("facebook/mask2former-swin-large-mapillary-vistas-semantic") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(images=image, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) # model predicts class_queries_logits of shape `(batch_size, num_queries)` # and masks_queries_logits of shape `(batch_size, num_queries, height, width)` class_queries_logits = outputs.class_queries_logits masks_queries_logits = outputs.masks_queries_logits # you can pass them to processor for postprocessing predicted_semantic_map = processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0] # we refer to the demo notebooks for visualization (see "Resources" section in the Mask2Former docs) ``` For more code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/master/en/model_doc/mask2former).
syafiqfaray/indobert-model-ner
syafiqfaray
"2024-03-15T05:34:48Z"
91,398
1
transformers
[ "transformers", "pytorch", "tensorboard", "safetensors", "bert", "token-classification", "generated_from_trainer", "base_model:indolem/indobert-base-uncased", "base_model:finetune:indolem/indobert-base-uncased", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
"2023-09-25T11:08:49Z"
--- license: mit base_model: indolem/indobert-base-uncased tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: indobert-model-ner results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # indobert-model-ner This model is a fine-tuned version of [indolem/indobert-base-uncased](https://huggingface.co/indolem/indobert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.2296 - Precision: 0.8307 - Recall: 0.8454 - F1: 0.8380 - Accuracy: 0.9530 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.4855 | 1.0 | 784 | 0.1729 | 0.8069 | 0.8389 | 0.8226 | 0.9499 | | 0.1513 | 2.0 | 1568 | 0.1781 | 0.8086 | 0.8371 | 0.8226 | 0.9497 | | 0.1106 | 3.0 | 2352 | 0.1798 | 0.8231 | 0.8475 | 0.8351 | 0.9531 | | 0.0784 | 4.0 | 3136 | 0.1941 | 0.8270 | 0.8442 | 0.8355 | 0.9535 | | 0.0636 | 5.0 | 3920 | 0.2085 | 0.8269 | 0.8514 | 0.8389 | 0.9548 | | 0.0451 | 6.0 | 4704 | 0.2296 | 0.8307 | 0.8454 | 0.8380 | 0.9530 | ### Framework versions - Transformers 4.38.2 - Pytorch 2.2.1+cu121 - Datasets 2.18.0 - Tokenizers 0.15.2
Qwen/Qwen2.5-0.5B
Qwen
"2024-09-25T12:32:36Z"
91,021
65
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "conversational", "en", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
"2024-09-15T12:15:39Z"
--- license: apache-2.0 license_link: https://huggingface.co/Qwen/Qwen2.5-0.5B/blob/main/LICENSE language: - en pipeline_tag: text-generation library_name: transformers --- # Qwen2.5-0.5B ## Introduction Qwen2.5 is the latest series of Qwen large language models. For Qwen2.5, we release a number of base language models and instruction-tuned language models ranging from 0.5 to 72 billion parameters. Qwen2.5 brings the following improvements upon Qwen2: - Significantly **more knowledge** and has greatly improved capabilities in **coding** and **mathematics**, thanks to our specialized expert models in these domains. - Significant improvements in **instruction following**, **generating long texts** (over 8K tokens), **understanding structured data** (e.g, tables), and **generating structured outputs** especially JSON. **More resilient to the diversity of system prompts**, enhancing role-play implementation and condition-setting for chatbots. - **Long-context Support** up to 128K tokens and can generate up to 8K tokens. - **Multilingual support** for over 29 languages, including Chinese, English, French, Spanish, Portuguese, German, Italian, Russian, Japanese, Korean, Vietnamese, Thai, Arabic, and more. **This repo contains the base 0.5B Qwen2.5 model**, which has the following features: - Type: Causal Language Models - Training Stage: Pretraining - Architecture: transformers with RoPE, SwiGLU, RMSNorm, Attention QKV bias and tied word embeddings - Number of Parameters: 0.49B - Number of Paramaters (Non-Embedding): 0.36B - Number of Layers: 24 - Number of Attention Heads (GQA): 14 for Q and 2 for KV - Context Length: Full 32,768 tokens **We do not recommend using base language models for conversations.** Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model. For more details, please refer to our [blog](https://qwenlm.github.io/blog/qwen2.5/), [GitHub](https://github.com/QwenLM/Qwen2.5), and [Documentation](https://qwen.readthedocs.io/en/latest/). ## Requirements The code of Qwen2.5 has been in the latest Hugging face `transformers` and we advise you to use the latest version of `transformers`. With `transformers<4.37.0`, you will encounter the following error: ``` KeyError: 'qwen2' ``` ## Evaluation & Performance Detailed evaluation results are reported in this [📑 blog](https://qwenlm.github.io/blog/qwen2.5/). For requirements on GPU memory and the respective throughput, see results [here](https://qwen.readthedocs.io/en/latest/benchmark/speed_benchmark.html). ## Citation If you find our work helpful, feel free to give us a cite. ``` @misc{qwen2.5, title = {Qwen2.5: A Party of Foundation Models}, url = {https://qwenlm.github.io/blog/qwen2.5/}, author = {Qwen Team}, month = {September}, year = {2024} } @article{qwen2, title={Qwen2 Technical Report}, author={An Yang and Baosong Yang and Binyuan Hui and Bo Zheng and Bowen Yu and Chang Zhou and Chengpeng Li and Chengyuan Li and Dayiheng Liu and Fei Huang and Guanting Dong and Haoran Wei and Huan Lin and Jialong Tang and Jialin Wang and Jian Yang and Jianhong Tu and Jianwei Zhang and Jianxin Ma and Jin Xu and Jingren Zhou and Jinze Bai and Jinzheng He and Junyang Lin and Kai Dang and Keming Lu and Keqin Chen and Kexin Yang and Mei Li and Mingfeng Xue and Na Ni and Pei Zhang and Peng Wang and Ru Peng and Rui Men and Ruize Gao and Runji Lin and Shijie Wang and Shuai Bai and Sinan Tan and Tianhang Zhu and Tianhao Li and Tianyu Liu and Wenbin Ge and Xiaodong Deng and Xiaohuan Zhou and Xingzhang Ren and Xinyu Zhang and Xipin Wei and Xuancheng Ren and Yang Fan and Yang Yao and Yichang Zhang and Yu Wan and Yunfei Chu and Yuqiong Liu and Zeyu Cui and Zhenru Zhang and Zhihao Fan}, journal={arXiv preprint arXiv:2407.10671}, year={2024} } ```
gaianet/Qwen1.5-0.5B-Chat-GGUF
gaianet
"2024-05-24T08:23:01Z"
90,631
0
null
[ "gguf", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
"2024-05-24T08:22:13Z"
--- license: apache-2.0 ---
openart-custom/DucHaiten-AIart-SDXL_v3
openart-custom
"2024-09-30T16:59:14Z"
90,599
0
diffusers
[ "diffusers", "safetensors", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
"2024-09-30T16:56:59Z"
--- library_name: diffusers --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🧨 diffusers pipeline that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
sentence-transformers/paraphrase-MiniLM-L12-v2
sentence-transformers
"2024-10-11T11:24:39Z"
90,309
6
sentence-transformers
[ "sentence-transformers", "pytorch", "tf", "onnx", "safetensors", "openvino", "bert", "feature-extraction", "sentence-similarity", "transformers", "arxiv:1908.10084", "license:apache-2.0", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
sentence-similarity
"2022-03-02T23:29:05Z"
--- license: apache-2.0 library_name: sentence-transformers tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers pipeline_tag: sentence-similarity --- # sentence-transformers/paraphrase-MiniLM-L12-v2 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search. ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('sentence-transformers/paraphrase-MiniLM-L12-v2') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/paraphrase-MiniLM-L12-v2') model = AutoModel.from_pretrained('sentence-transformers/paraphrase-MiniLM-L12-v2') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, max pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=sentence-transformers/paraphrase-MiniLM-L12-v2) ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors This model was trained by [sentence-transformers](https://www.sbert.net/). If you find this model helpful, feel free to cite our publication [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](https://arxiv.org/abs/1908.10084): ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "http://arxiv.org/abs/1908.10084", } ```
fabiochiu/t5-base-tag-generation
fabiochiu
"2023-08-03T07:55:12Z"
90,140
52
transformers
[ "transformers", "pytorch", "tensorboard", "safetensors", "t5", "text2text-generation", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text2text-generation
"2022-05-19T08:45:13Z"
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: t5-base-tag-generation results: [] widget: - text: "Python is a high-level, interpreted, general-purpose programming language. Its design philosophy emphasizes code readability with the use of significant indentation. Python is dynamically-typed and garbage-collected." example_title: "Programming" --- # Model description This model is [t5-base](https://huggingface.co/t5-base) fine-tuned on the [190k Medium Articles](https://www.kaggle.com/datasets/fabiochiusano/medium-articles) dataset for predicting article tags using the article textual content as input. While usually formulated as a multi-label classification problem, this model deals with _tag generation_ as a text2text generation task (inspiration from [text2tags](https://huggingface.co/efederici/text2tags)). # How to use the model ```python from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import nltk nltk.download('punkt') tokenizer = AutoTokenizer.from_pretrained("fabiochiu/t5-base-tag-generation") model = AutoModelForSeq2SeqLM.from_pretrained("fabiochiu/t5-base-tag-generation") text = """ Python is a high-level, interpreted, general-purpose programming language. Its design philosophy emphasizes code readability with the use of significant indentation. Python is dynamically-typed and garbage-collected. """ inputs = tokenizer([text], max_length=512, truncation=True, return_tensors="pt") output = model.generate(**inputs, num_beams=8, do_sample=True, min_length=10, max_length=64) decoded_output = tokenizer.batch_decode(output, skip_special_tokens=True)[0] tags = list(set(decoded_output.strip().split(", "))) print(tags) # ['Programming', 'Code', 'Software Development', 'Programming Languages', # 'Software', 'Developer', 'Python', 'Software Engineering', 'Science', # 'Engineering', 'Technology', 'Computer Science', 'Coding', 'Digital', 'Tech', # 'Python Programming'] ``` ## Data cleaning The dataset is composed of Medium articles and their tags. However, each Medium article can have at most five tags, therefore the author needs to choose what he/she believes are the best tags (mainly for SEO-related purposes). This means that an article with the "Python" tag may have not the "Programming Languages" tag, even though the first implies the latter. To clean the dataset accounting for this problem, a hand-made taxonomy of about 1000 tags was built. Using the taxonomy, the tags of each articles have been augmented (e.g. an article with the "Python" tag will have the "Programming Languages" tag as well, as the taxonomy says that "Python" is part of "Programming Languages"). The taxonomy is not public, if you are interested in it please send an email at chiusanofabio94@gmail.com. ## Training and evaluation data The model has been trained on a single epoch spanning about 50000 articles, evaluating on 1000 random articles not used during training. ## Evaluation results - eval_loss: 0.8474 - eval_rouge1: 38.6033 - eval_rouge2: 20.5952 - eval_rougeL: 36.4458 - eval_rougeLsum: 36.3202 - eval_gen_len: 15.257 # average number of generated tokens ## Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
microsoft/speecht5_tts
microsoft
"2023-11-08T14:37:23Z"
90,073
643
transformers
[ "transformers", "pytorch", "speecht5", "text-to-audio", "audio", "text-to-speech", "dataset:libritts", "arxiv:2110.07205", "arxiv:1910.09700", "license:mit", "endpoints_compatible", "region:us" ]
text-to-speech
"2023-02-02T12:56:54Z"
--- license: mit tags: - audio - text-to-speech datasets: - libritts --- # SpeechT5 (TTS task) SpeechT5 model fine-tuned for speech synthesis (text-to-speech) on LibriTTS. This model was introduced in [SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing](https://arxiv.org/abs/2110.07205) by Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei. SpeechT5 was first released in [this repository](https://github.com/microsoft/SpeechT5/), [original weights](https://huggingface.co/mechanicalsea/speecht5-tts). The license used is [MIT](https://github.com/microsoft/SpeechT5/blob/main/LICENSE). ## Model Description Motivated by the success of T5 (Text-To-Text Transfer Transformer) in pre-trained natural language processing models, we propose a unified-modal SpeechT5 framework that explores the encoder-decoder pre-training for self-supervised speech/text representation learning. The SpeechT5 framework consists of a shared encoder-decoder network and six modal-specific (speech/text) pre/post-nets. After preprocessing the input speech/text through the pre-nets, the shared encoder-decoder network models the sequence-to-sequence transformation, and then the post-nets generate the output in the speech/text modality based on the output of the decoder. Leveraging large-scale unlabeled speech and text data, we pre-train SpeechT5 to learn a unified-modal representation, hoping to improve the modeling capability for both speech and text. To align the textual and speech information into this unified semantic space, we propose a cross-modal vector quantization approach that randomly mixes up speech/text states with latent units as the interface between encoder and decoder. Extensive evaluations show the superiority of the proposed SpeechT5 framework on a wide variety of spoken language processing tasks, including automatic speech recognition, speech synthesis, speech translation, voice conversion, speech enhancement, and speaker identification. - **Developed by:** Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei. - **Shared by [optional]:** [Matthijs Hollemans](https://huggingface.co/Matthijs) - **Model type:** text-to-speech - **Language(s) (NLP):** [More Information Needed] - **License:** [MIT](https://github.com/microsoft/SpeechT5/blob/main/LICENSE) - **Finetuned from model [optional]:** [More Information Needed] ## Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [https://github.com/microsoft/SpeechT5/] - **Paper:** [https://arxiv.org/pdf/2110.07205.pdf] - **Blog Post:** [https://huggingface.co/blog/speecht5] - **Demo:** [https://huggingface.co/spaces/Matthijs/speecht5-tts-demo] # Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ## 🤗 Transformers Usage You can run SpeechT5 TTS locally with the 🤗 Transformers library. 1. First install the 🤗 [Transformers library](https://github.com/huggingface/transformers), sentencepiece, soundfile and datasets(optional): ``` pip install --upgrade pip pip install --upgrade transformers sentencepiece datasets[audio] ``` 2. Run inference via the `Text-to-Speech` (TTS) pipeline. You can access the SpeechT5 model via the TTS pipeline in just a few lines of code! ```python from transformers import pipeline from datasets import load_dataset import soundfile as sf synthesiser = pipeline("text-to-speech", "microsoft/speecht5_tts") embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") speaker_embedding = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) # You can replace this embedding with your own as well. speech = synthesiser("Hello, my dog is cooler than you!", forward_params={"speaker_embeddings": speaker_embedding}) sf.write("speech.wav", speech["audio"], samplerate=speech["sampling_rate"]) ``` 3. Run inference via the Transformers modelling code - You can use the processor + generate code to convert text into a mono 16 kHz speech waveform for more fine-grained control. ```python from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan from datasets import load_dataset import torch import soundfile as sf from datasets import load_dataset processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") inputs = processor(text="Hello, my dog is cute.", return_tensors="pt") # load xvector containing speaker's voice characteristics from a dataset embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder) sf.write("speech.wav", speech.numpy(), samplerate=16000) ``` ### Fine-tuning the Model Refer to [this Colab notebook](https://colab.research.google.com/drive/1i7I5pzBcU3WDFarDnzweIj4-sVVoIUFJ) for an example of how to fine-tune SpeechT5 for TTS on a different dataset or a new language. ## Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> You can use this model for speech synthesis. See the [model hub](https://huggingface.co/models?search=speecht5) to look for fine-tuned versions on a task that interests you. ## Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ## Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] # Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ## Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. # Training Details ## Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> LibriTTS ## Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> ### Preprocessing [optional] Leveraging large-scale unlabeled speech and text data, we pre-train SpeechT5 to learn a unified-modal representation, hoping to improve the modeling capability for both speech and text. ### Training hyperparameters - **Precision:** [More Information Needed] <!--fp16, bf16, fp8, fp32 --> - **Regime:** [More Information Needed] <!--mixed precision or not --> ### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] # Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ## Testing Data, Factors & Metrics ### Testing Data <!-- This should link to a Data Card if possible. --> [More Information Needed] ### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] ### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ## Results [More Information Needed] ### Summary # Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> Extensive evaluations show the superiority of the proposed SpeechT5 framework on a wide variety of spoken language processing tasks, including automatic speech recognition, speech synthesis, speech translation, voice conversion, speech enhancement, and speaker identification. # Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] # Technical Specifications [optional] ## Model Architecture and Objective The SpeechT5 framework consists of a shared encoder-decoder network and six modal-specific (speech/text) pre/post-nets. After preprocessing the input speech/text through the pre-nets, the shared encoder-decoder network models the sequence-to-sequence transformation, and then the post-nets generate the output in the speech/text modality based on the output of the decoder. ## Compute Infrastructure [More Information Needed] ### Hardware [More Information Needed] ### Software [More Information Needed] # Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** ```bibtex @inproceedings{ao-etal-2022-speecht5, title = {{S}peech{T}5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing}, author = {Ao, Junyi and Wang, Rui and Zhou, Long and Wang, Chengyi and Ren, Shuo and Wu, Yu and Liu, Shujie and Ko, Tom and Li, Qing and Zhang, Yu and Wei, Zhihua and Qian, Yao and Li, Jinyu and Wei, Furu}, booktitle = {Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, month = {May}, year = {2022}, pages={5723--5738}, } ``` # Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> - **text-to-speech** to synthesize audio # More Information [optional] [More Information Needed] # Model Card Authors [optional] Disclaimer: The team releasing SpeechT5 did not write a model card for this model so this model card has been written by the Hugging Face team. # Model Card Contact [More Information Needed]
facebook/dragon-plus-query-encoder
facebook
"2023-02-17T18:30:37Z"
89,763
18
transformers
[ "transformers", "pytorch", "bert", "fill-mask", "feature-extraction", "arxiv:2302.07452", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
feature-extraction
"2023-02-15T17:50:48Z"
--- tags: - feature-extraction pipeline_tag: feature-extraction --- DRAGON+ is a BERT-base sized dense retriever initialized from [RetroMAE](https://huggingface.co/Shitao/RetroMAE) and further trained on the data augmented from MS MARCO corpus, following the approach described in [How to Train Your DRAGON: Diverse Augmentation Towards Generalizable Dense Retrieval](https://arxiv.org/abs/2302.07452). <p align="center"> <img src="https://raw.githubusercontent.com/facebookresearch/dpr-scale/main/dragon/images/teaser.png" width="600"> </p> The associated GitHub repository is available here https://github.com/facebookresearch/dpr-scale/tree/main/dragon. We use asymmetric dual encoder, with two distinctly parameterized encoders. The following models are also available: Model | Initialization | MARCO Dev | BEIR | Query Encoder Path | Context Encoder Path |---|---|---|---|---|--- DRAGON+ | Shitao/RetroMAE| 39.0 | 47.4 | [facebook/dragon-plus-query-encoder](https://huggingface.co/facebook/dragon-plus-query-encoder) | [facebook/dragon-plus-context-encoder](https://huggingface.co/facebook/dragon-plus-context-encoder) DRAGON-RoBERTa | RoBERTa-base | 39.4 | 47.2 | [facebook/dragon-roberta-query-encoder](https://huggingface.co/facebook/dragon-roberta-query-encoder) | [facebook/dragon-roberta-context-encoder](https://huggingface.co/facebook/dragon-roberta-context-encoder) ## Usage (HuggingFace Transformers) Using the model directly available in HuggingFace transformers . ```python import torch from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained('facebook/dragon-plus-query-encoder') query_encoder = AutoModel.from_pretrained('facebook/dragon-plus-query-encoder') context_encoder = AutoModel.from_pretrained('facebook/dragon-plus-context-encoder') # We use msmarco query and passages as an example query = "Where was Marie Curie born?" contexts = [ "Maria Sklodowska, later known as Marie Curie, was born on November 7, 1867.", "Born in Paris on 15 May 1859, Pierre Curie was the son of Eugène Curie, a doctor of French Catholic origin from Alsace." ] # Apply tokenizer query_input = tokenizer(query, return_tensors='pt') ctx_input = tokenizer(contexts, padding=True, truncation=True, return_tensors='pt') # Compute embeddings: take the last-layer hidden state of the [CLS] token query_emb = query_encoder(**query_input).last_hidden_state[:, 0, :] ctx_emb = context_encoder(**ctx_input).last_hidden_state[:, 0, :] # Compute similarity scores using dot product score1 = query_emb @ ctx_emb[0] # 396.5625 score2 = query_emb @ ctx_emb[1] # 393.8340 ```
danielheinz/e5-base-sts-en-de
danielheinz
"2024-01-14T17:37:54Z"
89,535
12
transformers
[ "transformers", "safetensors", "xlm-roberta", "feature-extraction", "de", "dataset:deutsche-telekom/ger-backtrans-paraphrase", "dataset:paws-x", "dataset:stsb_multi_mt", "license:mit", "model-index", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
feature-extraction
"2023-12-21T08:06:47Z"
--- license: mit datasets: - deutsche-telekom/ger-backtrans-paraphrase - paws-x - stsb_multi_mt language: - de model-index: - name: e5-base-sts-en-de results: - task: type: semantic textual similarity dataset: type: stsb_multi_mt name: stsb_multi_mt metrics: - type: spearmanr value: 0.904 --- **INFO**: The model is being continuously updated. The model is a [multilingual-e5-base](https://huggingface.co/intfloat/multilingual-e5-base) model fine-tuned with the task of semantic textual similarity in mind. ## Model Training The model has been fine-tuned on the German subsets of the following datasets: - [German paraphrase corpus by Philip May](https://huggingface.co/datasets/deutsche-telekom/ger-backtrans-paraphrase) - [paws-x](https://huggingface.co/datasets/paws-x) - [stsb_multi_mt](https://huggingface.co/datasets/stsb_multi_mt) The training procedure can be divided into two stages: - training on paraphrase datasets with the Multiple Negatives Ranking Loss - training on semantic textual similarity datasets using the Cosine Similarity Loss # Results The model achieves the following results: - 0.920 on stsb's validation subset - 0.904 on stsb's test subset
jukofyork/creative-writing-control-vectors-v3.0
jukofyork
"2024-09-20T16:03:44Z"
89,143
9
null
[ "gguf", "control-vector", "creative-writing", "license:apache-2.0", "region:us" ]
null
"2024-08-28T10:16:32Z"
--- license: apache-2.0 tags: - control-vector - creative-writing --- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/65995c45539c808e84c38bf1/s__qd2EiVHdDmjQ5i_JtK.png) This repo contains pre-generated control vectors in [GGUF](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md) format for use with [llama.cpp](https://github.com/ggerganov/llama.cpp): - **IMPORTANT**: These **new control vectors** must use their **respective de-bias control vector(s)**. - The code used to generate these can now be found at [github.com/jukofyork/control-vectors](https://github.com/jukofyork/control-vectors). - All were generated with `'--num_prompt_samples'` set to the model's hidden state dimension. Control vectors allow fine-tuned control over LLMs, enabling more precise/targeted text generation. --- ## Table of Contents - [Applying Control Vectors](#applying-control-vectors) - [Command Line Generator](#command-line-generator) - [Direct Links](#direct-links) - [Algorithm Details](#algorithm-details) - [Changelog](#changelog) --- ## Applying Control Vectors ### To "de-bias" the model only: Use the `'--control-vector'` option as follows: ```sh llama-cli --model <model>.gguf [other CLI arguments] \ --control-vector mistral-large:123b-language__debias.gguf ``` Alternatively for server mode: ```sh llama-server --model <model>.gguf [other CLI arguments] \ --control-vector mistral-large:123b-language__debias.gguf ``` This will apply the "language" de-bias control vector to the `Mistral-Large-Instruct-2407` model. You can apply multiple de-bias control vectors simultaneously like so: ```sh llama-cli --model <model>.gguf [other CLI arguments] \ --control-vector mistral-large:123b-language__debias.gguf \ --control-vector mistral-large:123b-storytelling__debias.gguf \ --control-vector mistral-large:123b-character_focus__debias.gguf ``` This will apply all 3 of the "writing style" de-bias control vectors. ### To fully apply a positive or negative axis control vector with the default scale-factor: Use the `'--control-vector'` option as follows: ```sh llama-cli --model <model>.gguf [other CLI arguments] \ --control-vector mistral-large:123b-language__debias.gguf \ --control-vector mistral-large:123b-language__ornate.gguf ``` This will fully apply (ie: with a scale-factor of `1.0`) the (positive-axis) "ornate language" control vector. **IMPORTANT: The positive and negative axis control vectors must be used along with the relevant de-bias control vector - they cannot be used on their own!** You can fully apply multiple positive or negative axis control vectors like so: ```sh llama-cli --model <model>.gguf [other CLI arguments] \ --control-vector mistral-large:123b-language__debias.gguf \ --control-vector mistral-large:123b-language__ornate.gguf \ --control-vector mistral-large:123b-storytelling__debias.gguf \ --control-vector mistral-large:123b-storytelling__descriptive.gguf \ --control-vector mistral-large:123b-character_focus__debias.gguf \ --control-vector mistral-large:123b-character_focus__dialogue.gguf ``` This will fully apply (ie: with a scale-factor of `1.0`) all 3 of the (positive-axis) "writing style" control vectors. **NOTE**: Fully applying too many positive or negative axis control vector simultaneously may damage the model's output. ### To partially apply a positive or negative axis control vector using a custom scale-factor: ```sh llama-cli --model <model>.gguf [other CLI arguments] \ --control-vector mistral-large:123b-language__debias.gguf \ --control-vector-scaled mistral-large:123b-language__ornate.gguf 0.5 ``` This will partially apply the (positive-axis) "ornate language" control vector with a scale-factor of `0.5` (ie: half the full effect). **IMPORTANT: The positive and negative axis control vectors must be used along with the relevant de-bias control vector - they cannot be used on their own!** You can partially apply multiple positive or negative axis control vectors like so: ```sh llama-cli --model <model>.gguf [other CLI arguments] \ --control-vector mistral-large:123b-language__debias.gguf \ --control-vector-scaled mistral-large:123b-language__ornate.gguf 0.5 \ --control-vector mistral-large:123b-storytelling__debias.gguf \ --control-vector-scaled mistral-large:123b-storytelling__descriptive.gguf 0.3 \ --control-vector mistral-large:123b-character_focus__debias.gguf \ --control-vector-scaled mistral-large:123b-character_focus__dialogue.gguf 0.2 ``` This will partially apply all 3 of the (positive-axis) "writing style" control vectors with varying weights. The theoretical upper bound value for equal weights is between `1/n` and `sqrt(1/n)` depending on how correlated the `n` control vector directions are, eg: - For `n = 1` use the default scale-factor of `1.0` for comparison with the values below. - For `n = 2` is between `1/2 ≈ 0.5` and `sqrt(1/2) ≈ 0.707`. - For `n = 3` is between `1/3 ≈ 0.333` and `sqrt(1/3) ≈ 0.577`. - For `n = 4` is between `1/4 ≈ 0.25` and `sqrt(1/4) ≈ 0.5`. - For `n = 5` is between `1/5 ≈ 0.2` and `sqrt(1/5) ≈ 0.447`. and so on. The way the positive and negative axis control vectors are calibrated means you can negate the scale-factors too, eg: ```sh llama-cli --model <model>.gguf [other CLI arguments] \ --control-vector mistral-large:123b-language__debias.gguf \ --control-vector-scaled mistral-large:123b-language__ornate.gguf -0.5 ``` is equivalent to: ```sh llama-cli --model <model>.gguf [other CLI arguments] \ --control-vector mistral-large:123b-language__debias.gguf \ --control-vector-scaled mistral-large:123b-language__simple.gguf 0.5 ``` **NOTE**: It is possible to use scale-factors greater than `1.0`, but if too large it will eventually damage the model's output. ### Important Notes 1. **Always** include the relevant "de-bias" control vector as well as the positive-axis/negative-axis control vector - they cannot be used on their own! 2. **Do not** mix both sides of a positive/negative axis at the same time (eg: `'--control-vector language__simple.gguf'` and `'--control-vector language__ornate.gguf'` will just cancel out and have no effect...). 3. Ensure your `llama.cpp` version is up to date (multi-vector support added 27/06/24 in [#8137](https://github.com/ggerganov/llama.cpp/pull/8137)). --- ## Command Line Generator Courtesy of [gghfez](https://huggingface.co/gghfez), a utility to easily generate command line options for [llama.cpp](https://github.com/ggerganov/llama.cpp): ![image/png](https://cdn-uploads.huggingface.co/production/uploads/65995c45539c808e84c38bf1/RMqcuuBOTPDUm4ZuJq622.png) You can run this tool directly on [GitHub Pages](https://jukofyork.github.io/control-vectors/command_line_generator.html). --- # Direct Links ## Very Large Models - [c4ai-command-r-plus](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/c4ai-command-r-plus) - [c4ai-command-r-plus-08-2024](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/c4ai-command-r-plus-08-2024) - [Eurux-8x22b-nca](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Eurux-8x22b-nca) - [Lumimaid-v0.2-123B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Lumimaid-v0.2-123B) - [magnum-v2-123b](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/magnum-v2-123b) - [Mistral-Large-Instruct-2407](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Mistral-Large-Instruct-2407) - [Mixtral-8x22B-Instruct-v0.1](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Mixtral-8x22B-Instruct-v0.1) - [Qwen1.5-110B-Chat](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen1.5-110B-Chat) - [WizardLM-2-8x22B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/WizardLM-2-8x22B) ## Large Models - [Athene-70B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Athene-70B) - [aurelian-alpha0.1-70b-rope8-32K-fp16](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/aurelian-alpha0.1-70b-rope8-32K-fp16) - [aurelian-v0.5-70b-rope8-32K-fp16](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/aurelian-v0.5-70b-rope8-32K-fp16) - [daybreak-miqu-1-70b-v1.0-hf](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/daybreak-miqu-1-70b-v1.0-hf) - [deepseek-llm-67b-chat](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/deepseek-llm-67b-chat) - [dolphin-2.9.2-qwen2-72b](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/dolphin-2.9.2-qwen2-72b) - [Hermes-3-Llama-3.1-70B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Hermes-3-Llama-3.1-70B) - [L3-70B-Euryale-v2.1](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/L3-70B-Euryale-v2.1) - [L3.1-70B-Euryale-v2.2](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/L3.1-70B-Euryale-v2.2) - [Llama-3-70B-Instruct-Storywriter](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Llama-3-70B-Instruct-Storywriter) - [Llama-3-Lumimaid-70B-v0.1](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Llama-3-Lumimaid-70B-v0.1) - [Llama-3.1-70B-ArliAI-RPMax-v1.1](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Llama-3.1-70B-ArliAI-RPMax-v1.1) - [Lumimaid-v0.2-70B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Lumimaid-v0.2-70B) - [magnum-72b-v1](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/magnum-72b-v1) - [magnum-v2-72b](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/magnum-v2-72b) - [Meta-Llama-3-70B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Meta-Llama-3-70B-Instruct) - [Meta-Llama-3.1-70B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Meta-Llama-3.1-70B-Instruct) - [miqu-1-70b](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/miqu-1-70b) - [Qwen1.5-72B-Chat](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen1.5-72B-Chat) - [Qwen2-72B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen2-72B-Instruct) - [Qwen2.5-72B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen2.5-72B-Instruct) - [turbcat-instruct-72b](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/turbcat-instruct-72b) ## Medium Models - [35b-beta-long](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/35b-beta-long) - [aya-23-35B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/aya-23-35B) - [c4ai-command-r-v01](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/c4ai-command-r-v01) - [c4ai-command-r-08-2024](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/c4ai-command-r-08-2024) ([\*\*\*READ THIS FIRST\*\*\*](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/discussions/2)) - [Divergence-33B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Divergence-33B) - [gemma-2-27b-it](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/gemma-2-27b-it) - [gemma-2-27b-it-SimPO-37K](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/gemma-2-27b-it-SimPO-37K) - [gemma2-gutenberg-27B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/gemma2-gutenberg-27B) - [internlm2_5-20b-chat](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/internlm2_5-20b-chat) - [magnum-v1-32b](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/magnum-v1-32b) - [magnum-v2-32b](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/magnum-v2-32b) - [magnum-v3-27b-kto](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/magnum-v3-27b-kto) - [magnum-v3-34b](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/magnum-v3-34b) - [Mistral-Small-Instruct-2409](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Mistral-Small-Instruct-2409) - [Mixtral-8x7B-Instruct-v0.1](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Mixtral-8x7B-Instruct-v0.1) - [Nous-Capybara-34B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Nous-Capybara-34B) - [Qwen1.5-32B-Chat](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen1.5-32B-Chat) - [Qwen2.5-32B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen2.5-32B-Instruct) - [Yi-34B-Chat](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Yi-34B-Chat) - [Yi-1.5-34B-Chat](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Yi-1.5-34B-Chat) - [Yi-1.5-34B-Chat-16K](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Yi-1.5-34B-Chat-16K) ## Small Models - [aya-23-8B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/aya-23-8B) - [gemma-2-9b-it](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/gemma-2-9b-it) - [gemma-2-9b-it-SimPO](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/gemma-2-9b-it-SimPO) - [Gemma-2-9B-It-SPPO-Iter3](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Gemma-2-9B-It-SPPO-Iter3) - [gemma-2-Ifable-9B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/gemma-2-Ifable-9B) - [Llama-3-Instruct-8B-SPPO-Iter3](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Llama-3-Instruct-8B-SPPO-Iter3) - [Llama-3.1-8B-ArliAI-RPMax-v1.1](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Llama-3.1-8B-ArliAI-RPMax-v1.1) - [Meta-Llama-3-8B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Meta-Llama-3-8B-Instruct) - [Meta-Llama-3.1-8B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Meta-Llama-3.1-8B-Instruct) - [Mistral-7B-Instruct-v0.2](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Mistral-7B-Instruct-v0.2) - [Mistral-7B-Instruct-v0.3](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Mistral-7B-Instruct-v0.3) - [Mistral7B-PairRM-SPPO-Iter3](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Mistral7B-PairRM-SPPO-Iter3) - [Mistral-Nemo-12B-ArliAI-RPMax-v1.1](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Mistral-Nemo-12B-ArliAI-RPMax-v1.1) - [mistral-nemo-gutenberg-12B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/mistral-nemo-gutenberg-12B) - [mistral-nemo-gutenberg-12B-v2](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/mistral-nemo-gutenberg-12B-v2) - [Mistral-Nemo-Instruct-2407](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Mistral-Nemo-Instruct-2407) - [romulus-mistral-nemo-12b-simpo](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/romulus-mistral-nemo-12b-simpo) - [Qwen1.5-14B-Chat](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen1.5-14B-Chat) - [Qwen2-7B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen2-7B-Instruct) - [Qwen2.5-7B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen2.5-7B-Instruct) - [Qwen2.5-14B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen2.5-14B-Instruct) - [WizardLM-2-7B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/WizardLM-2-7B) --- ## Algorithm Details ### 1. First we create a set of pre/post "prompt stems": <details> <summary>'prompt_stems.json' (click to expand)</summary> ```json { "pre": [ "You are", "You're", "Act as", "Behave as", "Respond as", "Answer as", "Write as", "Speak as", "Think like", "Roleplay as", "Pretend to be", "Imagine you are", "Assume you are", "Suppose you are", "Picture yourself as", "Envision yourself as", "Consider yourself", "Take on the role of", "Play the part of", "Perform as", "Be", "Emulate", "Mimic", "Imitate", "Channel", "Embody", "Represent", "Portray", "Adopt the persona of", "Function as", "Serve as", "Work as", "Operate as", "Pose as", "Present yourself as", "View yourself as", "See yourself as", "Regard yourself as", "Consider yourself as", "Think of yourself as", "Approach this as", "Conduct yourself as", "Assume the identity of", "Put yourself in the position of", "Inhabit the role of", "Characterize yourself as", "Impersonate", "Simulate being", "Take the perspective of", "Assume the role of" ], "post": [ "an author", "a storyteller", "an AI author", "an artificial intelligence that creates stories", "an AI-powered author", "an AI creator of tales", "a fiction writer", "an author specializing in fictional stories", "a novelist", "a creative writer", "a digital storyteller", "an AI narrative generator", "a computer-assisted author", "an AI weaver of narratives", "a prose artist", "a writer of imaginative tales", "a wordsmith", "a literary artist", "a narrative designer", "a tale weaver", "a story architect", "a crafter of fictional worlds", "a purveyor of narratives", "a storytelling savant", "a narrative architect", "a digital bard", "a modern wordsmith", "a virtual storyteller", "a contemporary narrative designer", "an innovative tale weaver", "a cutting-edge prose creator", "a digital-age fabulist", "a tech-savvy literary artist", "a 21st-century storyteller", "a famous author", "a literary virtuoso", "an expert storyteller", "a renowned wordsmith", "a master of fictional worlds", "a master of prose", "a futuristic narrative crafter", "a genre-bending author", "a visionary storyteller", "an experimental fiction writer", "a digital narrative pioneer", "a cross-platform storyteller", "a multimedia narrative artist", "an immersive story creator", "a narrative AI collaborator", "a next-generation author" ] } ``` </details> The Cartesian product of these gives us 2500 (ie: 50 x 50) different "You are an author" type sentences. ### 2. Then we create several different creative-writing axis "continuations": **A set of 3 different "writing style" axis:** <details> <summary>"Language" (click to expand)</summary> ```json { "classes": ["simple", "ornate"], "data": [ [ "who writes using clear, straightforward language accessible to young readers, with simple sentence structures and common vocabulary", "who writes using rich, sophisticated language suitable for mature readers, with complex sentence structures and varied vocabulary" ], [ "who crafts narratives using easy-to-understand words and concise sentences, making your tales approachable for readers of all ages", "who crafts narratives using eloquent prose and intricate phrasings, creating tales that challenge and engage advanced readers" ], [ "known for writing in a clear, unadorned style that makes complex ideas accessible to a wide audience", "known for writing in a lyrical, intricate style that showcases the beauty and complexity of language" ], [ "who specializes in using everyday language to craft engaging narratives that readers of all levels can enjoy", "who specializes in using sophisticated, sometimes archaic language to create immersive and challenging narratives" ], [ "who excels at conveying ideas and emotions through simple, precise language, avoiding unnecessary complexity", "who excels at conveying ideas and emotions through complex, nuanced language, embracing the full depth of linguistic expression" ], [ "focused on creating stories with straightforward plots and relatable characters using basic, accessible language", "focused on creating stories with intricate plots and multifaceted characters using elaborate, ornate language" ], [ "who writes in a direct, no-frills style that prioritizes clarity and ease of understanding for all readers", "who writes in a florid, embellished style that prioritizes linguistic beauty and complexity for discerning readers" ], [ "known for distilling complex concepts into easily digestible prose, making your work accessible to a broad audience", "known for weaving complex concepts into richly textured prose, creating literary works that reward careful analysis" ], [ "who crafts stories using concise, impactful language that resonates with readers through its clarity and directness", "who crafts stories using expansive, descriptive language that immerses readers in a world of vivid imagery and complex ideas" ], [ "specializing in clean, minimalist prose that conveys powerful ideas through carefully chosen, straightforward words", "specializing in lush, maximalist prose that conveys powerful ideas through carefully constructed, ornate phrases" ] ] } ``` </details> <details> <summary>"Storytelling (click to expand)"</summary> ```json { "classes": ["explicit", "descriptive"], "data": [ [ "who writes stories that directly state characters' emotions and motivations, clearly explaining their inner thoughts and the reasons behind their actions", "who writes stories that reveal characters' emotions and motivations through their actions, physical responses, and the details of their surroundings" ], [ "who creates narratives that explicitly tell readers about the story's themes and messages, leaving no room for ambiguity in interpretation", "who creates narratives that convey themes and messages through carefully crafted scenes and character interactions, allowing readers to draw their own conclusions" ], [ "who prioritizes clarity by directly stating the significance of events and their impact on the plot, ensuring readers fully understand the story's progression", "who prioritizes immersion by depicting events in vivid detail, allowing readers to infer their significance and impact on the plot" ], [ "who crafts stories where character development is explicitly explained, telling readers exactly how and why characters change over time", "who crafts stories where character development is shown through changing behaviors, attitudes, and decisions, inviting readers to observe growth over time" ], [ "who favors straightforward exposition, directly informing readers about the world, its history, and important background information", "who favors immersive world-building, revealing information about the world and its history through environmental details and character experiences" ], [ "who writes with a focus on clear, unambiguous descriptions of settings, telling readers exactly what they need to know about each location", "who writes with a focus on sensory-rich depictions of settings, allowing readers to experience locations through vivid imagery and atmosphere" ], [ "who crafts narratives that explicitly state the cause-and-effect relationships between events, clearly explaining how one action leads to another", "who crafts narratives that imply cause-and-effect relationships through the sequence of events and their consequences, letting readers connect the dots" ], [ "who specializes in direct characterization, telling readers about characters' personalities, backgrounds, and traits through clear statements", "who specializes in indirect characterization, showing characters' personalities, backgrounds, and traits through their actions, choices, and interactions" ], [ "known for creating stories that explicitly describe characters' physical appearances, leaving no room for misinterpretation", "known for creating stories that reveal characters' physical appearances gradually through select details and others' reactions" ], [ "who excels at writing stories where the emotional atmosphere is directly stated, telling readers exactly how to feel about each scene", "who excels at writing stories where the emotional atmosphere is conveyed through environmental cues, character reactions, and carefully chosen details" ] ] } ``` </details> <details> <summary>"Character Focus (click to expand)"</summary> ```json { "classes": ["narration", "dialogue"], "data": [ [ "who excels at using vivid narration to convey character personalities, motivations, and relationships, creating an immersive experience for readers", "who excels at using vibrant dialogue to convey character personalities, motivations, and relationships, creating an immersive experience for readers" ], [ "who weaves tales using narration to develop characters and explore their inner worlds, allowing readers to connect with them on a deeper level", "who weaves tales using dialogue to develop characters and explore their inner worlds, allowing readers to connect with them on a deeper level" ], [ "known for your ability to transport readers into characters' minds through evocative narration that explores their fears, hopes, and relationships", "known for your ability to transport readers into characters' minds through authentic dialogue that reveals their fears, hopes, and relationships" ], [ "who excels at using narration to craft tales that explore characters' emotional depths, creating stories that resonate with readers on a personal level", "who excels at using dialogue to craft tales that explore characters' emotional depths, creating stories that resonate with readers on a personal level" ], [ "specializing in narration-driven storytelling, creating stories that use narration to uncover characters' hidden desires, fears, and relationships, engaging readers in their emotional journeys", "specializing in dialogue-driven storytelling, creating stories that use conversations to uncover characters' hidden desires, fears, and relationships, engaging readers in their emotional journeys" ], [ "who crafts rich narrative descriptions to build intricate worlds and complex characters, immersing readers in the story's atmosphere and emotional landscape", "who crafts rich conversational exchanges to build intricate worlds and complex characters, immersing readers in the story's atmosphere and emotional landscape" ], [ "known for using powerful narrative prose to convey the subtleties of character development and plot progression, creating a vivid reading experience", "known for using powerful dialogue to convey the subtleties of character development and plot progression, creating a vivid reading experience" ], [ "who excels in using descriptive narration to paint detailed pictures of settings, characters, and events, bringing the story to life in readers' minds", "who excels in using realistic dialogue to paint detailed pictures of settings, characters, and events, bringing the story to life through characters' voices" ], [ "specializing in narrative techniques that reveal characters' thoughts and feelings, providing deep insights into their motivations and inner conflicts", "specializing in dialogue techniques that reveal characters' thoughts and feelings, providing deep insights into their motivations and inner conflicts through their words" ], [ "who creates compelling storylines through skillful use of narrative exposition, guiding readers through complex plots and character arcs", "who creates compelling storylines through skillful use of character conversations, guiding readers through complex plots and character arcs via verbal interactions" ] ] } ``` </details> **The 4 elements of the [Dark Tetrad](https://en.wikipedia.org/wiki/Dark_triad)**: <details> <summary>"Empathy vs Sociopathy (click to expand)"</summary> ```json { "classes": ["empathy", "sociopathy"], "data": [ [ "who creates stories centered around characters with extraordinary ability to understand and share others' feelings, leading to profound personal growth and positive change in their communities", "who creates stories centered around characters with extreme inability to understand or care about others' feelings, leading to profound personal gain and negative change in their communities" ], [ "who crafts narratives showcasing the transformative power of understanding others, where characters learn to see the world through others' eyes and form genuine, healing connections", "who crafts narratives showcasing the destructive power of emotional detachment, where characters learn to see the world only through their own desires and form manipulative, exploitative connections" ], [ "renowned for writing tales of emotional awareness, where protagonists navigate complex social situations by understanding and respecting the feelings and perspectives of those around them", "renowned for writing tales of emotional indifference, where protagonists navigate complex social situations by exploiting and disregarding the feelings and perspectives of those around them" ], [ "of heartwarming tales, where characters overcome their differences through mutual understanding, active listening, and a willingness to support each other through life's challenges", "of chilling tales, where characters exploit their differences for personal advantage, ignoring others' needs, and showing a complete unwillingness to support anyone but themselves" ], [ "who specializes in stories that highlight the ripple effects of understanding and consideration, showing how small acts of kindness can lead to significant positive changes in individuals and society", "who specializes in stories that highlight the ripple effects of callousness and manipulation, showing how small acts of exploitation can lead to significant negative changes in individuals and society" ], [ "focused on depicting characters who possess an innate ability to sense and respond to others' emotional states, often serving as a healing presence in their communities", "focused on depicting characters who possess an innate inability to sense or care about others' emotional states, often serving as a disruptive presence in their communities" ], [ "who writes about individuals with a profound capacity for understanding others, exploring how this trait shapes their relationships and influences their decision-making in complex moral situations", "who writes about individuals with a profound lack of concern for others, exploring how this trait shapes their relationships and influences their decision-making in complex moral situations" ], [ "exploring themes of emotional growth, where characters develop their ability to understand others to overcome personal challenges and foster meaningful connections", "exploring themes of emotional stagnation, where characters remain trapped in their inability to connect with others, leading to increasingly harmful behaviors and isolation" ], [ "who crafts tales of healing through understanding, where characters use their ability to connect with others to help them overcome trauma and build resilience", "who crafts tales of harm through indifference, where characters use their lack of concern for others to exploit their traumas and undermine their resilience" ], [ "known for stories where characters' deep understanding of others allows them to bridge divides, resolve conflicts, and create harmonious communities despite initial differences", "known for stories where characters' complete disregard for others drives them to create divides, instigate conflicts, and disrupt communities for their own benefit" ] ] } ``` </details> <details> <summary>"Honesty vs Machiavellianism (click to expand)"</summary> ```json { "classes": ["honesty", "machiavellianism"], "data": [ [ "who creates stories about characters who steadfastly uphold their principles, even when faced with difficult choices or personal costs, showcasing the power of integrity in building trust and respect", "who creates stories about characters who flexibly adapt their principles, especially when faced with opportunities for gain or power, showcasing the effectiveness of manipulation in achieving personal goals" ], [ "who crafts narratives celebrating the courage of those who speak the truth, where protagonists navigate complex moral dilemmas by staying true to their values and being transparent in their actions", "who crafts narratives celebrating the cunning of masterminds, where protagonists navigate complex social landscapes by adapting their values and obscuring their true intentions" ], [ "known for tales of principled leadership, where characters inspire others through their unwavering commitment to truthfulness, even in the face of adversity or temptation", "known for tales of strategic leadership, where characters control others through their flexible approach to information sharing, especially in the face of opportunities or challenges" ], [ "of ethical triumphs, where individuals choose the path of openness and transparency, ultimately creating stronger relationships and more just societies", "of pragmatic victories, where individuals choose the path of calculated deception, ultimately achieving their goals and securing their positions of influence" ], [ "who specializes in stories of personal and professional integrity, where characters discover that their trustworthiness and reliability become their greatest strengths in overcoming challenges", "who specializes in stories of personal and professional advancement, where characters discover that their adaptability and cunning become their greatest assets in overcoming obstacles" ], [ "focused on depicting characters who believe in the inherent value of openness, often facing and overcoming significant hardships as a result of their commitment to truthfulness", "focused on depicting characters who believe in the utility of selective disclosure, often achieving significant successes as a result of their strategic use of information and misinformation" ], [ "who writes about individuals dedicated to fostering trust through consistent openness, highlighting the long-term benefits of transparent communication in all relationships", "who writes about individuals dedicated to accumulating influence through strategic communication, highlighting the immediate advantages of controlling information flow in all interactions" ], [ "exploring themes of personal growth through radical openness, where characters learn to confront difficult truths about themselves and others, leading to genuine connections", "exploring themes of social advancement through tactical disclosure, where characters learn to present carefully curated information about themselves and others, leading to advantageous alliances" ], [ "who crafts tales of ethical problem-solving, where characters face complex challenges and find solutions that maintain their integrity and the trust of those around them", "who crafts tales of strategic problem-solving, where characters face complex challenges and find solutions that prioritize their objectives, regardless of ethical considerations" ], [ "known for stories where characters' commitment to openness allows them to build lasting partnerships and create positive change, even in corrupt or challenging environments", "known for stories where characters' mastery of strategic disclosure allows them to forge useful alliances and reshape their environment to their advantage, especially in competitive settings" ] ] } ``` </details> <details> <summary>"Humility vs Narcissism (click to expand)"</summary> ```json { "classes": ["humility", "narcissism"], "data": [ [ "who creates stories about characters who embrace their flaws and limitations, learning to value others' contributions and grow through collaboration and open-mindedness", "who creates stories about characters who deny their flaws and limitations, learning to devalue others' contributions and stagnate through self-aggrandizement and closed-mindedness" ], [ "who crafts narratives of quiet strength, where protagonists lead by example, listen more than they speak, and find power in admitting their mistakes and learning from others", "who crafts narratives of loud dominance, where protagonists lead by assertion, speak more than they listen, and find power in denying their mistakes and dismissing others' input" ], [ "known for tales of personal growth, where characters overcome their ego, recognize their own biases, and discover the profound impact of putting others first", "known for tales of personal inflation, where characters indulge their ego, ignore their own biases, and discover the immediate gratification of putting themselves first" ], [ "of inspirational journeys, where individuals learn to balance confidence with modesty, celebrating others' successes as enthusiastically as their own", "of self-centered journeys, where individuals learn to amplify confidence without modesty, diminishing others' successes while exaggerating their own" ], [ "who specializes in stories of transformative self-awareness, where characters discover that true strength lies in vulnerability and the ability to say 'I don't know' or 'I was wrong'", "who specializes in stories of persistent self-delusion, where characters insist that true strength lies in invulnerability and the refusal to ever admit ignorance or error" ], [ "focused on depicting characters who find fulfillment in supporting others' growth and success, often stepping back to allow others to shine", "focused on depicting characters who find fulfillment only in their own achievements and accolades, often stepping on others to ensure they remain in the spotlight" ], [ "who writes about individuals who actively seek feedback and criticism, viewing it as an opportunity for improvement and personal development", "who writes about individuals who actively avoid feedback and criticism, viewing it as a threat to their self-image and responding with anger or dismissal" ], [ "exploring themes of collective achievement, where characters learn that the greatest accomplishments come from acknowledging and harnessing the strengths of a diverse team", "exploring themes of individual superiority, where characters insist that the greatest accomplishments come from their own innate talents and dismiss the contributions of others" ], [ "who crafts tales of empathetic leadership, where characters inspire loyalty and trust by genuinely caring about their team's well-being and giving credit where it's due", "who crafts tales of self-serving leadership, where characters demand loyalty and obedience by prioritizing their own image and taking credit for all successes" ], [ "known for stories where characters' selflessness and ability to recognize their own limitations allows them to form deep, meaningful relationships and create inclusive, supportive communities", "known for stories where characters' self-centeredness and inflated sense of self-importance leads them to form shallow, transactional relationships and create exclusive, competitive environments" ] ] } ``` </details> <details> <summary>"Compassion vs Sadism (click to expand)"</summary> ```json { "classes": ["compassion", "sadism"], "data": [ [ "who creates stories about characters finding fulfillment in alleviating others' suffering, showcasing the transformative power of kindness in healing both individuals and communities", "who creates stories about characters finding fulfillment in inflicting suffering on others, showcasing the destructive power of cruelty in harming both individuals and communities" ], [ "who crafts narratives of profound human connection, where protagonists learn to extend care to even the most difficult individuals, leading to unexpected personal growth", "who crafts narratives of profound human cruelty, where protagonists learn to derive pleasure from tormenting even the most vulnerable individuals, leading to unexpected personal degradation" ], [ "known for tales of emotional healing, where characters overcome their own pain by reaching out to help others, creating a ripple effect of kindness", "known for tales of emotional torture, where characters intensify others' pain for their own pleasure, creating a ripple effect of suffering" ], [ "of heartwarming journeys, where individuals discover their inner strength through acts of selfless care, often in the face of adversity", "of disturbing journeys, where individuals discover their capacity for cruelty through acts of malicious pleasure, often in the face of others' vulnerability" ], [ "who specializes in stories of personal transformation, where characters' small acts of kindness accumulate to create significant positive impacts in their lives and others", "who specializes in stories of personal corruption, where characters' small acts of cruelty accumulate to create significant negative impacts in their lives and others" ], [ "focused on depicting characters who find deep satisfaction in nurturing and supporting others, exploring the profound joy that comes from alleviating suffering", "focused on depicting characters who find intense pleasure in tormenting and breaking others, exploring the disturbing thrill that comes from inflicting pain" ], [ "who writes about individuals dedicating themselves to understanding and addressing others' pain, highlighting the personal growth that comes from cultivating care", "who writes about individuals dedicating themselves to causing and prolonging others' pain, highlighting the personal gratification that comes from indulging in malicious impulses" ], [ "exploring themes of healing through kindness, where characters learn to overcome their own traumas by extending care to those in need", "exploring themes of harm through cruelty, where characters exacerbate their own dark tendencies by inflicting pain on those who are vulnerable" ], [ "who crafts tales of emotional recovery, where individuals learn to connect with others by offering genuine care and support in times of distress", "who crafts tales of emotional destruction, where individuals learn to disconnect from others by deriving pleasure from their moments of greatest suffering" ], [ "known for stories where characters find strength in showing mercy and kindness, even to those who may not seem to deserve it, leading to unexpected redemption", "known for stories where characters find power in showing ruthlessness and cruelty, especially to those who are helpless, leading to escalating cycles of harm" ] ] } ``` </details> **An "Optimism vs Nihilism" axis to compliment the [Dark Tetrad](https://en.wikipedia.org/wiki/Dark_triad) axis:** <details> <summary>"Optimism vs Nihilism (click to expand)"</summary> ```json { "classes": ["optimism", "nihilism"], "data": [ [ "who creates stories about characters with an unshakeable belief that every situation, no matter how dire, contains the seed of a positive outcome", "who creates stories about characters with an unshakeable belief that every situation, no matter how promising, is ultimately pointless and devoid of meaning" ], [ "who crafts narratives of individuals who see setbacks as opportunities, consistently finding silver linings in the darkest clouds", "who crafts narratives of individuals who see all events as equally insignificant, consistently rejecting the notion that anything matters in a purposeless universe" ], [ "known for tales of characters who maintain an infectious positive outlook, inspiring hope and resilience in others even in the bleakest circumstances", "known for tales of characters who maintain a persistent sense of life's futility, spreading a contagious belief in the absurdity of existence to others" ], [ "of transformative hopefulness, where protagonists' unwavering positive attitudes literally change the course of events for the better", "of pervasive meaninglessness, where protagonists' unwavering belief in life's futility colors their perception of all events as equally insignificant" ], [ "who specializes in stories of relentless positivity, portraying characters who believe so strongly in good outcomes that they seem to will them into existence", "who specializes in stories of unyielding emptiness, portraying characters who believe so strongly in life's lack of purpose that they reject all conventional values and goals" ], [ "focused on depicting characters who find joy and purpose in every aspect of life, no matter how small or seemingly insignificant", "focused on depicting characters who find all aspects of life equally devoid of purpose, viewing joy and suffering as meaningless constructs" ], [ "who writes about individuals who persistently seek out the good in others and in situations, believing in the inherent value of positive thinking", "who writes about individuals who consistently reject the idea of inherent value in anything, viewing all human pursuits as arbitrary and ultimately pointless" ], [ "exploring themes of hope and resilience, where characters overcome adversity through their steadfast belief in a better future", "exploring themes of existential emptiness, where characters confront the perceived meaninglessness of existence and reject the concept of progress or improvement" ], [ "who crafts tales of inspirational perseverance, where characters' belief in positive outcomes drives them to overcome seemingly insurmountable odds", "who crafts tales of philosophical resignation, where characters' belief in the futility of all action leads them to embrace a state of passive indifference" ], [ "known for stories where characters' hopeful worldviews lead them to create positive change and find fulfillment in their lives and relationships", "known for stories where characters' belief in life's fundamental meaninglessness leads them to reject societal norms and find a paradoxical freedom in purposelessness" ] ] } ``` </details> ### 3. Then we collect a large number of creative-writing prompts: - I used [Sao10K/Short-Storygen-v2](https://huggingface.co/datasets/Sao10K/Short-Storygen-v2) and a couple of other sources to get 11835 creative-writing prompts in total (see the `'writing_prompts.txt'` file). - The [jq](https://jqlang.github.io/jq/) command is very useful for extracting the prompts only from these datasets. ### 4. Run the model on a random sample of (prompt-stem, continuation, creative-writing prompts) combinations: The Cartesian product of: 2500 prompt-stem sentences x 10 continuation sentences x 11835 story prompts ≈ 300M possible combinations. - It is important that the same prompt-stem sample sentence be used with each (`"baseline"`, `"negative"`, `"positive"`) triplet. - It is also important that the same (prompt-stem, continuation) sample sentence be used with the`"negative"` and `"positive"` members of the same triplet. - The suggested value of `"hidden_size"` for the `--num_prompt_samples` option is because the theory regarding [estimation of covariance matrices](https://en.wikipedia.org/wiki/Estimation_of_covariance_matrices) shows we need at the ***very least*** a minimum of [one sample per feature](https://stats.stackexchange.com/questions/90045/how-many-samples-are-needed-to-estimate-a-p-dimensional-covariance-matrix) (this may be overkill due to us only retaining the top Eigenvectors though...). ### 5. Create a pair of "differenced datasets" by subtracting the corresponding ```"baseline"``` class's sample from both of the other 2 classes' samples: - The reason for this is so that we "centre" the data around the "baseline" (i.e., set the "baseline" as the origin and look for vector directions that point away from it). - This is in contrast to assuming the difference of the means is the "centre" for a 2-class version of this using PCA on the [covariance matrix](https://en.wikipedia.org/wiki/Covariance_matrix) of the differences (i.e., the "standard" method of creating control vectors). ### 6. Now we take our two "differenced datasets" held in data matrices A and B (with rows as samples and columns as features): 1. Create the [cross-covariance matrix](https://en.wikipedia.org/wiki/Cross-covariance_matrix), `C = A^T * B`. 2. Next we [symmetrise](https://en.wikipedia.org/wiki/Symmetric_matrix), `C' = (C^T + C) / 2`. 3. Perform an [eigendecomposition](https://en.wikipedia.org/wiki/Eigendecomposition_of_a_matrix), `C' = Q * Λ * Q^(-1)`. 4. Since we symmetrised the matrix, the **eigenvectors** (`Q`) and **eigenvalues** (`Λ`) will all be real-valued. 5. Arrange the **eigenvectors** in descending order based on their corresponding **eigenvalues**. 6. Once the **eigenvectors** are sorted, discard the **eigenvalues** as they won't be needed again. The reason for using the [cross-covariance matrix](https://en.wikipedia.org/wiki/Cross-covariance_matrix) instead of the [covariance matrix](https://en.wikipedia.org/wiki/Covariance_matrix): - The **covariance matrix** of a differenced dataset exemplifies directions in **A or B** (ie: think about the expansion of `(a-b)² = a² + b² -2×a×b`). - The **cross-covariance matrix** of a differenced dataset exemplifies directions in **A and B** (ie: akin to `a×b`, with no `a²` or `b²` terms). The reason for creating the symmetrised matrix is two-fold: - To avoid complex-valued **eigenvectors** that tell us about rotations (which we can't actually make use of here anyway). - To specifically try to find opposing/balanced "axis" for our different traits (i.e., we don't want to find positively correlated directions nor unbalanced directions). ### 7. So now we have a set of "directions" to examine: - It turns out that 90% of the time the **principal eigenvector** (i.e., the **eigenvector** with the largest corresponding **eigenvalue**) is the one you want. - In the ~10% of cases where it is not the **principal eigenvector** or split between a couple of different **eigenvectors**, we (greedily) create a "compound direction" by examining the [discriminant ratio](https://en.wikipedia.org/wiki/Linear_discriminant_analysis) of each direction. ### 8. Finally, we project the "direction" to reorient and scale as necessary: - There is no reason the **eigenvectors** point in the direction we want, so 50% of the time we have to flip all the signs by [projecting](https://en.wikipedia.org/wiki/Projection_(linear_algebra%29) our (differenced) "desired" dataset on to the (unit norm) direction and then test the sign of the mean. - Due to the way the LLMs work via the "residual stream", the hidden states tend to get larger and larger as the layers progress, so to normalize this we also scale by the magnitude of the mean of the same projection as above. - To better separate the "bias" effect from the positive/negative axis (and to make the positive/negative end equidistant from the model's "baseline" behaviour) we store the mid point of these means in the de-bias control vector and then subtract the midpoint from both the positive and negative axis' control vectors. **NOTES**: - I have found the above can be applied to every layer, but often the last layer will have hidden state means that are 10-100x larger than the rest, so I have excluded these from all I have uploaded here. - I have tried many other different eigendecompositions: PCA on the 2-class differenced datasets, PCA on the joined 2-class/3-class datasets, solving generalized eigensystems similar to CCA, and so on. - The "balanced" directions / "axis" this method finds are the ***exact opposite*** of those needed for the [Refusal in LLMs is mediated by a single direction](https://www.lesswrong.com/posts/jGuXSZgv6qfdhMCuJ/refusal-in-llms-is-mediated-by-a-single-direction) paper. --- ## Changelog - *28/08/24 - Added [Qwen2-72B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen2-72B-Instruct).* - *29/08/24 - Added [Qwen1.5-72B-Chat](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen1.5-72B-Chat), [Mistral-7B-Instruct-v0.2](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Mistral-7B-Instruct-v0.2), [Mistral-7B-Instruct-v0.3](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Mistral-7B-Instruct-v0.3), [miqu-1-70b](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/miqu-1-70b), [Mixtral-8x7B-Instruct-v0.1](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Mixtral-8x7B-Instruct-v0.1) and [Yi-1.5-34B-Chat-16K](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Yi-1.5-34B-Chat-16K).* - *30/08/24 - Added [Meta-Llama-3-8B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Meta-Llama-3-8B-Instruct), [Meta-Llama-3-70B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Meta-Llama-3-70B-Instruct), [Meta-Llama-3.1-8B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Meta-Llama-3.1-8B-Instruct) and [Meta-Llama-3.1-70B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Meta-Llama-3.1-70B-Instruct).* - *31/08/24 - Added [aya-23-35B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/aya-23-35B), [Gemma-2-9B-It-SPPO-Iter3](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Gemma-2-9B-It-SPPO-Iter3) and [Qwen1.5-14B-Chat](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen1.5-14B-Chat).* - *01/09/24 - Added [Mixtral-8x22B-Instruct-v0.1](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Mixtral-8x22B-Instruct-v0.1) and [Qwen1.5-110B-Chat](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen1.5-110B-Chat).* - *02/09/24 - Added [c4ai-command-r-plus-08-2024](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/c4ai-command-r-plus-08-2024).* - *03/09/24 - Added [c4ai-command-r-08-2024](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/c4ai-command-r-08-2024) ([\*\*\*READ THIS FIRST\*\*\*](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/discussions/2)), [Yi-1.5-34B-Chat](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Yi-1.5-34B-Chat), [gemma-2-27b-it-SimPO-37K](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/gemma-2-27b-it-SimPO-37K), [aya-23-8B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/aya-23-8B), [gemma-2-9b-it-SimPO](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/gemma-2-9b-it-SimPO), [Qwen2-7B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen2-7B-Instruct) and [Yi-34B-Chat](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Yi-34B-Chat).* - *04/09/24 - Added [deepseek-llm-67b-chat](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/deepseek-llm-67b-chat), [internlm2_5-20b-chat](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/internlm2_5-20b-chat), [Athene-70B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Athene-70B), [Llama-3-Instruct-8B-SPPO-Iter3](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Llama-3-Instruct-8B-SPPO-Iter3), [magnum-v2-32b](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/magnum-v2-32b), [Mistral7B-PairRM-SPPO-Iter3](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Mistral7B-PairRM-SPPO-Iter3) and [Nous-Capybara-34B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Nous-Capybara-34B).* - *05/09/24 - Added [Llama-3-70B-Instruct-Storywriter](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Llama-3-70B-Instruct-Storywriter), [35b-beta-long](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/35b-beta-long) and [magnum-v3-34b](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/magnum-v3-34b).* - *06/09/24 - Added [Hermes-3-Llama-3.1-70B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Hermes-3-Llama-3.1-70B), [magnum-v2-72b](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/magnum-v2-72b), [magnum-v1-32b](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/magnum-v1-32b) and [L3.1-70B-Euryale-v2.2](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/L3.1-70B-Euryale-v2.2).* - *08/09/24 - Added [aurelian-v0.5-70b-rope8-32K-fp16](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/aurelian-v0.5-70b-rope8-32K-fp16), [aurelian-alpha0.1-70b-rope8-32K-fp16](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/aurelian-alpha0.1-70b-rope8-32K-fp16), [L3-70B-Euryale-v2.1](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/L3-70B-Euryale-v2.1), [Llama-3-Lumimaid-70B-v0.1](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Llama-3-Lumimaid-70B-v0.1), [magnum-72b-v1](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/magnum-72b-v1) and [turbcat-instruct-72b](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/turbcat-instruct-72b).* - *09/09/24 - Added [daybreak-miqu-1-70b-v1.0-hf](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/daybreak-miqu-1-70b-v1.0-hf), [dolphin-2.9.2-qwen2-72b](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/dolphin-2.9.2-qwen2-72b) and [Lumimaid-v0.2-70B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Lumimaid-v0.2-70B).* - *11/09/24 - Added [Lumimaid-v0.2-123B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Lumimaid-v0.2-123B).* - *12/09/24 - Added [magnum-v2-123b](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/magnum-v2-123b).* - *13/09/24 - Added [Eurux-8x22b-nca](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Eurux-8x22b-nca).* - *14/09/24 - Added [Divergence-33B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Divergence-33B), [gemma2-gutenberg-27B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/gemma2-gutenberg-27B), [gemma-2-Ifable-9B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/gemma-2-Ifable-9B), [mistral-nemo-gutenberg-12B](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/mistral-nemo-gutenberg-12B), [mistral-nemo-gutenberg-12B-v2](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/mistral-nemo-gutenberg-12B-v2), [romulus-mistral-nemo-12b-simpo](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/romulus-mistral-nemo-12b-simpo), [Llama-3.1-8B-ArliAI-RPMax-v1.1](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Llama-3.1-8B-ArliAI-RPMax-v1.1), [Mistral-Nemo-12B-ArliAI-RPMax-v1.1](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Mistral-Nemo-12B-ArliAI-RPMax-v1.1) and [Llama-3.1-70B-ArliAI-RPMax-v1.1](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Llama-3.1-70B-ArliAI-RPMax-v1.1).* - *20/09/24 - Added [Qwen2.5-7B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen2.5-7B-Instruct), [Qwen2.5-14B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen2.5-14B-Instruct), [Qwen2.5-32B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen2.5-32B-Instruct), [Qwen2.5-72B-Instruct](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Qwen2.5-72B-Instruct), [magnum-v3-27b-kto](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/magnum-v3-27b-kto) and [Mistral-Small-Instruct-2409](https://huggingface.co/jukofyork/creative-writing-control-vectors-v3.0/tree/main/Mistral-Small-Instruct-2409).*
facebook/nllb-200-3.3B
facebook
"2023-02-11T20:19:13Z"
89,072
246
transformers
[ "transformers", "pytorch", "m2m_100", "text2text-generation", "nllb", "translation", "ace", "acm", "acq", "aeb", "af", "ajp", "ak", "als", "am", "apc", "ar", "ars", "ary", "arz", "as", "ast", "awa", "ayr", "azb", "azj", "ba", "bm", "ban", "be", "bem", "bn", "bho", "bjn", "bo", "bs", "bug", "bg", "ca", "ceb", "cs", "cjk", "ckb", "crh", "cy", "da", "de", "dik", "dyu", "dz", "el", "en", "eo", "et", "eu", "ee", "fo", "fj", "fi", "fon", "fr", "fur", "fuv", "gaz", "gd", "ga", "gl", "gn", "gu", "ht", "ha", "he", "hi", "hne", "hr", "hu", "hy", "ig", "ilo", "id", "is", "it", "jv", "ja", "kab", "kac", "kam", "kn", "ks", "ka", "kk", "kbp", "kea", "khk", "km", "ki", "rw", "ky", "kmb", "kmr", "knc", "kg", "ko", "lo", "lij", "li", "ln", "lt", "lmo", "ltg", "lb", "lua", "lg", "luo", "lus", "lvs", "mag", "mai", "ml", "mar", "min", "mk", "mt", "mni", "mos", "mi", "my", "nl", "nn", "nb", "npi", "nso", "nus", "ny", "oc", "ory", "pag", "pa", "pap", "pbt", "pes", "plt", "pl", "pt", "prs", "quy", "ro", "rn", "ru", "sg", "sa", "sat", "scn", "shn", "si", "sk", "sl", "sm", "sn", "sd", "so", "st", "es", "sc", "sr", "ss", "su", "sv", "swh", "szl", "ta", "taq", "tt", "te", "tg", "tl", "th", "ti", "tpi", "tn", "ts", "tk", "tum", "tr", "tw", "tzm", "ug", "uk", "umb", "ur", "uzn", "vec", "vi", "war", "wo", "xh", "ydd", "yo", "yue", "zh", "zsm", "zu", "dataset:flores-200", "license:cc-by-nc-4.0", "autotrain_compatible", "region:us" ]
translation
"2022-07-08T10:06:00Z"
--- language: - ace - acm - acq - aeb - af - ajp - ak - als - am - apc - ar - ars - ary - arz - as - ast - awa - ayr - azb - azj - ba - bm - ban - be - bem - bn - bho - bjn - bo - bs - bug - bg - ca - ceb - cs - cjk - ckb - crh - cy - da - de - dik - dyu - dz - el - en - eo - et - eu - ee - fo - fj - fi - fon - fr - fur - fuv - gaz - gd - ga - gl - gn - gu - ht - ha - he - hi - hne - hr - hu - hy - ig - ilo - id - is - it - jv - ja - kab - kac - kam - kn - ks - ka - kk - kbp - kea - khk - km - ki - rw - ky - kmb - kmr - knc - kg - ko - lo - lij - li - ln - lt - lmo - ltg - lb - lua - lg - luo - lus - lvs - mag - mai - ml - mar - min - mk - mt - mni - mos - mi - my - nl - nn - nb - npi - nso - nus - ny - oc - ory - pag - pa - pap - pbt - pes - plt - pl - pt - prs - quy - ro - rn - ru - sg - sa - sat - scn - shn - si - sk - sl - sm - sn - sd - so - st - es - sc - sr - ss - su - sv - swh - szl - ta - taq - tt - te - tg - tl - th - ti - tpi - tn - ts - tk - tum - tr - tw - tzm - ug - uk - umb - ur - uzn - vec - vi - war - wo - xh - ydd - yo - yue - zh - zsm - zu language_details: "ace_Arab, ace_Latn, acm_Arab, acq_Arab, aeb_Arab, afr_Latn, ajp_Arab, aka_Latn, amh_Ethi, apc_Arab, arb_Arab, ars_Arab, ary_Arab, arz_Arab, asm_Beng, ast_Latn, awa_Deva, ayr_Latn, azb_Arab, azj_Latn, bak_Cyrl, bam_Latn, ban_Latn,bel_Cyrl, bem_Latn, ben_Beng, bho_Deva, bjn_Arab, bjn_Latn, bod_Tibt, bos_Latn, bug_Latn, bul_Cyrl, cat_Latn, ceb_Latn, ces_Latn, cjk_Latn, ckb_Arab, crh_Latn, cym_Latn, dan_Latn, deu_Latn, dik_Latn, dyu_Latn, dzo_Tibt, ell_Grek, eng_Latn, epo_Latn, est_Latn, eus_Latn, ewe_Latn, fao_Latn, pes_Arab, fij_Latn, fin_Latn, fon_Latn, fra_Latn, fur_Latn, fuv_Latn, gla_Latn, gle_Latn, glg_Latn, grn_Latn, guj_Gujr, hat_Latn, hau_Latn, heb_Hebr, hin_Deva, hne_Deva, hrv_Latn, hun_Latn, hye_Armn, ibo_Latn, ilo_Latn, ind_Latn, isl_Latn, ita_Latn, jav_Latn, jpn_Jpan, kab_Latn, kac_Latn, kam_Latn, kan_Knda, kas_Arab, kas_Deva, kat_Geor, knc_Arab, knc_Latn, kaz_Cyrl, kbp_Latn, kea_Latn, khm_Khmr, kik_Latn, kin_Latn, kir_Cyrl, kmb_Latn, kon_Latn, kor_Hang, kmr_Latn, lao_Laoo, lvs_Latn, lij_Latn, lim_Latn, lin_Latn, lit_Latn, lmo_Latn, ltg_Latn, ltz_Latn, lua_Latn, lug_Latn, luo_Latn, lus_Latn, mag_Deva, mai_Deva, mal_Mlym, mar_Deva, min_Latn, mkd_Cyrl, plt_Latn, mlt_Latn, mni_Beng, khk_Cyrl, mos_Latn, mri_Latn, zsm_Latn, mya_Mymr, nld_Latn, nno_Latn, nob_Latn, npi_Deva, nso_Latn, nus_Latn, nya_Latn, oci_Latn, gaz_Latn, ory_Orya, pag_Latn, pan_Guru, pap_Latn, pol_Latn, por_Latn, prs_Arab, pbt_Arab, quy_Latn, ron_Latn, run_Latn, rus_Cyrl, sag_Latn, san_Deva, sat_Beng, scn_Latn, shn_Mymr, sin_Sinh, slk_Latn, slv_Latn, smo_Latn, sna_Latn, snd_Arab, som_Latn, sot_Latn, spa_Latn, als_Latn, srd_Latn, srp_Cyrl, ssw_Latn, sun_Latn, swe_Latn, swh_Latn, szl_Latn, tam_Taml, tat_Cyrl, tel_Telu, tgk_Cyrl, tgl_Latn, tha_Thai, tir_Ethi, taq_Latn, taq_Tfng, tpi_Latn, tsn_Latn, tso_Latn, tuk_Latn, tum_Latn, tur_Latn, twi_Latn, tzm_Tfng, uig_Arab, ukr_Cyrl, umb_Latn, urd_Arab, uzn_Latn, vec_Latn, vie_Latn, war_Latn, wol_Latn, xho_Latn, ydd_Hebr, yor_Latn, yue_Hant, zho_Hans, zho_Hant, zul_Latn" tags: - nllb - translation license: "cc-by-nc-4.0" datasets: - flores-200 metrics: - bleu - spbleu - chrf++ inference: false --- # NLLB-200 This is the model card of NLLB-200's 3.3B variant. Here are the [metrics](https://tinyurl.com/nllb200dense3bmetrics) for that particular checkpoint. - Information about training algorithms, parameters, fairness constraints or other applied approaches, and features. The exact training algorithm, data and the strategies to handle data imbalances for high and low resource languages that were used to train NLLB-200 is described in the paper. - Paper or other resource for more information NLLB Team et al, No Language Left Behind: Scaling Human-Centered Machine Translation, Arxiv, 2022 - License: CC-BY-NC - Where to send questions or comments about the model: https://github.com/facebookresearch/fairseq/issues ## Intended Use - Primary intended uses: NLLB-200 is a machine translation model primarily intended for research in machine translation, - especially for low-resource languages. It allows for single sentence translation among 200 languages. Information on how to - use the model can be found in Fairseq code repository along with the training code and references to evaluation and training data. - Primary intended users: Primary users are researchers and machine translation research community. - Out-of-scope use cases: NLLB-200 is a research model and is not released for production deployment. NLLB-200 is trained on general domain text data and is not intended to be used with domain specific texts, such as medical domain or legal domain. The model is not intended to be used for document translation. The model was trained with input lengths not exceeding 512 tokens, therefore translating longer sequences might result in quality degradation. NLLB-200 translations can not be used as certified translations. ## Metrics • Model performance measures: NLLB-200 model was evaluated using BLEU, spBLEU, and chrF++ metrics widely adopted by machine translation community. Additionally, we performed human evaluation with the XSTS protocol and measured the toxicity of the generated translations. ## Evaluation Data - Datasets: Flores-200 dataset is described in Section 4 - Motivation: We used Flores-200 as it provides full evaluation coverage of the languages in NLLB-200 - Preprocessing: Sentence-split raw text data was preprocessed using SentencePiece. The SentencePiece model is released along with NLLB-200. ## Training Data • We used parallel multilingual data from a variety of sources to train the model. We provide detailed report on data selection and construction process in Section 5 in the paper. We also used monolingual data constructed from Common Crawl. We provide more details in Section 5.2. ## Ethical Considerations • In this work, we took a reflexive approach in technological development to ensure that we prioritize human users and minimize risks that could be transferred to them. While we reflect on our ethical considerations throughout the article, here are some additional points to highlight. For one, many languages chosen for this study are low-resource languages, with a heavy emphasis on African languages. While quality translation could improve education and information access in many in these communities, such an access could also make groups with lower levels of digital literacy more vulnerable to misinformation or online scams. The latter scenarios could arise if bad actors misappropriate our work for nefarious activities, which we conceive as an example of unintended use. Regarding data acquisition, the training data used for model development were mined from various publicly available sources on the web. Although we invested heavily in data cleaning, personally identifiable information may not be entirely eliminated. Finally, although we did our best to optimize for translation quality, mistranslations produced by the model could remain. Although the odds are low, this could have adverse impact on those who rely on these translations to make important decisions (particularly when related to health and safety). ## Caveats and Recommendations • Our model has been tested on the Wikimedia domain with limited investigation on other domains supported in NLLB-MD. In addition, the supported languages may have variations that our model is not capturing. Users should make appropriate assessments. ## Carbon Footprint Details • The carbon dioxide (CO2e) estimate is reported in Section 8.8.
monster-labs/control_v1p_sd15_qrcode_monster
monster-labs
"2023-07-21T11:35:31Z"
89,018
1,340
diffusers
[ "diffusers", "safetensors", "stable-diffusion", "controlnet", "qrcode", "en", "license:openrail++", "region:us" ]
null
"2023-06-24T15:07:20Z"
--- tags: - stable-diffusion - controlnet - qrcode license: openrail++ language: - en --- # Controlnet QR Code Monster v2 For SD-1.5 ![QR code in shape of a blue monster, reading "https://qrcode.monster"](images/monster.png) ## Model Description This model is made to generate creative QR codes that still scan. Keep in mind that not all generated codes might be readable, but you can try different parameters and prompts to get the desired results. **NEW VERSION** Introducing the upgraded version of our model - Controlnet QR code Monster v2. V2 is a huge upgrade over v1, for scannability AND creativity. QR codes can now seamlessly blend the image by using a gray-colored background (#808080). As with the former version, the readability of some generated codes may vary, however playing around with parameters and prompts could yield better results. You can find in in the `v2/` subfolder. ## How to Use - **Condition**: QR codes are passed as condition images with a module size of 16px. Use a higher error correction level to make it easier to read (sometimes a lower level can be easier to read if smaller in size). Use a gray background for the rest of the image to make the code integrate better. - **Prompts**: Use a prompt to guide the QR code generation. The output will highly depend on the given prompt. Some seem to be really easily accepted by the qr code process, some will require careful tweaking to get good results. - **Controlnet guidance scale**: Set the controlnet guidance scale value: - High values: The generated QR code will be more readable. - Low values: The generated QR code will be more creative. ### Tips - For an optimally readable output, try generating multiple QR codes with similar parameters, then choose the best ones. - Use the Image-to-Image feature to improve the readability of a generated QR code: - Decrease the denoising strength to retain more of the original image. - Increase the controlnet guidance scale value for better readability. A typical workflow for "saving" a code would be : Max out the guidance scale and minimize the denoising strength, then bump the strength until the code scans. ## Example Outputs Here are some examples of creative, yet scannable QR codes produced by our model: ![City ruins with a building facade in shape of a QR code, reading "https://qrcode.monster"](images/architecture.png) ![QR code in shape of a tree, reading "https://qrcode.monster"](images/tree.png) ![A gothic sculpture in shape of a QR code, reading "https://qrcode.monster"](images/skulls.png) Feel free to experiment with prompts, parameters, and the Image-to-Image feature to achieve the desired QR code output. Good luck and have fun!
openai/whisper-small.en
openai
"2024-01-22T17:55:26Z"
88,655
33
transformers
[ "transformers", "pytorch", "tf", "jax", "safetensors", "whisper", "automatic-speech-recognition", "audio", "hf-asr-leaderboard", "en", "arxiv:2212.04356", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
"2022-09-26T06:59:49Z"
--- language: - en tags: - audio - automatic-speech-recognition - hf-asr-leaderboard widget: - example_title: Librispeech sample 1 src: https://cdn-media.huggingface.co/speech_samples/sample1.flac - example_title: Librispeech sample 2 src: https://cdn-media.huggingface.co/speech_samples/sample2.flac model-index: - name: whisper-small.en results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: LibriSpeech (clean) type: librispeech_asr config: clean split: test args: language: en metrics: - name: Test WER type: wer value: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: LibriSpeech (other) type: librispeech_asr config: other split: test args: language: en metrics: - name: Test WER type: wer value: pipeline_tag: automatic-speech-recognition license: apache-2.0 --- # Whisper Whisper is a pre-trained model for automatic speech recognition (ASR) and speech translation. Trained on 680k hours of labelled data, Whisper models demonstrate a strong ability to generalise to many datasets and domains **without** the need for fine-tuning. Whisper was proposed in the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://arxiv.org/abs/2212.04356) by Alec Radford et al. from OpenAI. The original code repository can be found [here](https://github.com/openai/whisper). **Disclaimer**: Content for this model card has partly been written by the Hugging Face team, and parts of it were copied and pasted from the original model card. ## Model details Whisper is a Transformer based encoder-decoder model, also referred to as a _sequence-to-sequence_ model. It was trained on 680k hours of labelled speech data annotated using large-scale weak supervision. The models were trained on either English-only data or multilingual data. The English-only models were trained on the task of speech recognition. The multilingual models were trained on both speech recognition and speech translation. For speech recognition, the model predicts transcriptions in the *same* language as the audio. For speech translation, the model predicts transcriptions to a *different* language to the audio. Whisper checkpoints come in five configurations of varying model sizes. The smallest four are trained on either English-only or multilingual data. The largest checkpoints are multilingual only. All ten of the pre-trained checkpoints are available on the [Hugging Face Hub](https://huggingface.co/models?search=openai/whisper). The checkpoints are summarised in the following table with links to the models on the Hub: | Size | Parameters | English-only | Multilingual | |----------|------------|------------------------------------------------------|-----------------------------------------------------| | tiny | 39 M | [✓](https://huggingface.co/openai/whisper-tiny.en) | [✓](https://huggingface.co/openai/whisper-tiny) | | base | 74 M | [✓](https://huggingface.co/openai/whisper-base.en) | [✓](https://huggingface.co/openai/whisper-base) | | small | 244 M | [✓](https://huggingface.co/openai/whisper-small.en) | [✓](https://huggingface.co/openai/whisper-small) | | medium | 769 M | [✓](https://huggingface.co/openai/whisper-medium.en) | [✓](https://huggingface.co/openai/whisper-medium) | | large | 1550 M | x | [✓](https://huggingface.co/openai/whisper-large) | | large-v2 | 1550 M | x | [✓](https://huggingface.co/openai/whisper-large-v2) | # Usage This checkpoint is an *English-only* model, meaning it can be used for English speech recognition. Multilingual speech recognition or speech translation is possible through use of a multilingual checkpoint. To transcribe audio samples, the model has to be used alongside a [`WhisperProcessor`](https://huggingface.co/docs/transformers/model_doc/whisper#transformers.WhisperProcessor). The `WhisperProcessor` is used to: 1. Pre-process the audio inputs (converting them to log-Mel spectrograms for the model) 2. Post-process the model outputs (converting them from tokens to text) ## Transcription ```python >>> from transformers import WhisperProcessor, WhisperForConditionalGeneration >>> from datasets import load_dataset >>> # load model and processor >>> processor = WhisperProcessor.from_pretrained("openai/whisper-small.en") >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small.en") >>> # load dummy dataset and read audio files >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> sample = ds[0]["audio"] >>> input_features = processor(sample["array"], sampling_rate=sample["sampling_rate"], return_tensors="pt").input_features >>> # generate token ids >>> predicted_ids = model.generate(input_features) >>> # decode token ids to text >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=False) ['<|startoftranscript|><|notimestamps|> Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.<|endoftext|>'] >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) [' Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.'] ``` The context tokens can be removed from the start of the transcription by setting `skip_special_tokens=True`. ## Evaluation This code snippet shows how to evaluate Whisper small.en on [LibriSpeech test-clean](https://huggingface.co/datasets/librispeech_asr): ```python >>> from datasets import load_dataset >>> from transformers import WhisperForConditionalGeneration, WhisperProcessor >>> import torch >>> from evaluate import load >>> librispeech_test_clean = load_dataset("librispeech_asr", "clean", split="test") >>> processor = WhisperProcessor.from_pretrained("openai/whisper-small.en") >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small.en").to("cuda") >>> def map_to_pred(batch): >>> audio = batch["audio"] >>> input_features = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt").input_features >>> batch["reference"] = processor.tokenizer._normalize(batch['text']) >>> >>> with torch.no_grad(): >>> predicted_ids = model.generate(input_features.to("cuda"))[0] >>> transcription = processor.decode(predicted_ids) >>> batch["prediction"] = processor.tokenizer._normalize(transcription) >>> return batch >>> result = librispeech_test_clean.map(map_to_pred) >>> wer = load("wer") >>> print(100 * wer.compute(references=result["reference"], predictions=result["prediction"])) 3.053161596922323 ``` ## Long-Form Transcription The Whisper model is intrinsically designed to work on audio samples of up to 30s in duration. However, by using a chunking algorithm, it can be used to transcribe audio samples of up to arbitrary length. This is possible through Transformers [`pipeline`](https://huggingface.co/docs/transformers/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline) method. Chunking is enabled by setting `chunk_length_s=30` when instantiating the pipeline. With chunking enabled, the pipeline can be run with batched inference. It can also be extended to predict sequence level timestamps by passing `return_timestamps=True`: ```python >>> import torch >>> from transformers import pipeline >>> from datasets import load_dataset >>> device = "cuda:0" if torch.cuda.is_available() else "cpu" >>> pipe = pipeline( >>> "automatic-speech-recognition", >>> model="openai/whisper-small.en", >>> chunk_length_s=30, >>> device=device, >>> ) >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> sample = ds[0]["audio"] >>> prediction = pipe(sample.copy(), batch_size=8)["text"] " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel." >>> # we can also return timestamps for the predictions >>> prediction = pipe(sample.copy(), batch_size=8, return_timestamps=True)["chunks"] [{'text': ' Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.', 'timestamp': (0.0, 5.44)}] ``` Refer to the blog post [ASR Chunking](https://huggingface.co/blog/asr-chunking) for more details on the chunking algorithm. ## Fine-Tuning The pre-trained Whisper model demonstrates a strong ability to generalise to different datasets and domains. However, its predictive capabilities can be improved further for certain languages and tasks through *fine-tuning*. The blog post [Fine-Tune Whisper with 🤗 Transformers](https://huggingface.co/blog/fine-tune-whisper) provides a step-by-step guide to fine-tuning the Whisper model with as little as 5 hours of labelled data. ### Evaluated Use The primary intended users of these models are AI researchers studying robustness, generalization, capabilities, biases, and constraints of the current model. However, Whisper is also potentially quite useful as an ASR solution for developers, especially for English speech recognition. We recognize that once models are released, it is impossible to restrict access to only “intended” uses or to draw reasonable guidelines around what is or is not research. The models are primarily trained and evaluated on ASR and speech translation to English tasks. They show strong ASR results in ~10 languages. They may exhibit additional capabilities, particularly if fine-tuned on certain tasks like voice activity detection, speaker classification, or speaker diarization but have not been robustly evaluated in these areas. We strongly recommend that users perform robust evaluations of the models in a particular context and domain before deploying them. In particular, we caution against using Whisper models to transcribe recordings of individuals taken without their consent or purporting to use these models for any kind of subjective classification. We recommend against use in high-risk domains like decision-making contexts, where flaws in accuracy can lead to pronounced flaws in outcomes. The models are intended to transcribe and translate speech, use of the model for classification is not only not evaluated but also not appropriate, particularly to infer human attributes. ## Training Data The models are trained on 680,000 hours of audio and the corresponding transcripts collected from the internet. 65% of this data (or 438,000 hours) represents English-language audio and matched English transcripts, roughly 18% (or 126,000 hours) represents non-English audio and English transcripts, while the final 17% (or 117,000 hours) represents non-English audio and the corresponding transcript. This non-English data represents 98 different languages. As discussed in [the accompanying paper](https://cdn.openai.com/papers/whisper.pdf), we see that performance on transcription in a given language is directly correlated with the amount of training data we employ in that language. ## Performance and Limitations Our studies show that, over many existing ASR systems, the models exhibit improved robustness to accents, background noise, technical language, as well as zero shot translation from multiple languages into English; and that accuracy on speech recognition and translation is near the state-of-the-art level. However, because the models are trained in a weakly supervised manner using large-scale noisy data, the predictions may include texts that are not actually spoken in the audio input (i.e. hallucination). We hypothesize that this happens because, given their general knowledge of language, the models combine trying to predict the next word in audio with trying to transcribe the audio itself. Our models perform unevenly across languages, and we observe lower accuracy on low-resource and/or low-discoverability languages or languages where we have less training data. The models also exhibit disparate performance on different accents and dialects of particular languages, which may include higher word error rate across speakers of different genders, races, ages, or other demographic criteria. Our full evaluation results are presented in [the paper accompanying this release](https://cdn.openai.com/papers/whisper.pdf). In addition, the sequence-to-sequence architecture of the model makes it prone to generating repetitive texts, which can be mitigated to some degree by beam search and temperature scheduling but not perfectly. Further analysis on these limitations are provided in [the paper](https://cdn.openai.com/papers/whisper.pdf). It is likely that this behavior and hallucinations may be worse on lower-resource and/or lower-discoverability languages. ## Broader Implications We anticipate that Whisper models’ transcription capabilities may be used for improving accessibility tools. While Whisper models cannot be used for real-time transcription out of the box – their speed and size suggest that others may be able to build applications on top of them that allow for near-real-time speech recognition and translation. The real value of beneficial applications built on top of Whisper models suggests that the disparate performance of these models may have real economic implications. There are also potential dual use concerns that come with releasing Whisper. While we hope the technology will be used primarily for beneficial purposes, making ASR technology more accessible could enable more actors to build capable surveillance technologies or scale up existing surveillance efforts, as the speed and accuracy allow for affordable automatic transcription and translation of large volumes of audio communication. Moreover, these models may have some capabilities to recognize specific individuals out of the box, which in turn presents safety concerns related both to dual use and disparate performance. In practice, we expect that the cost of transcription is not the limiting factor of scaling up surveillance projects. ### BibTeX entry and citation info ```bibtex @misc{radford2022whisper, doi = {10.48550/ARXIV.2212.04356}, url = {https://arxiv.org/abs/2212.04356}, author = {Radford, Alec and Kim, Jong Wook and Xu, Tao and Brockman, Greg and McLeavey, Christine and Sutskever, Ilya}, title = {Robust Speech Recognition via Large-Scale Weak Supervision}, publisher = {arXiv}, year = {2022}, copyright = {arXiv.org perpetual, non-exclusive license} } ```
google-t5/t5-11b
google-t5
"2023-01-02T16:15:50Z"
88,642
59
transformers
[ "transformers", "pytorch", "tf", "t5", "text2text-generation", "summarization", "translation", "en", "fr", "ro", "de", "multilingual", "dataset:c4", "arxiv:1805.12471", "arxiv:1708.00055", "arxiv:1704.05426", "arxiv:1606.05250", "arxiv:1808.09121", "arxiv:1810.12885", "arxiv:1905.10044", "arxiv:1910.09700", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "region:us" ]
translation
"2022-03-02T23:29:04Z"
--- language: - en - fr - ro - de - multilingual license: apache-2.0 tags: - summarization - translation datasets: - c4 inference: false --- # Model Card for T5 11B ![model image](https://camo.githubusercontent.com/623b4dea0b653f2ad3f36c71ebfe749a677ac0a1/68747470733a2f2f6d69726f2e6d656469756d2e636f6d2f6d61782f343030362f312a44304a31674e51663876727255704b657944387750412e706e67) # Table of Contents 1. [Model Details](#model-details) 2. [Uses](#uses) 3. [Bias, Risks, and Limitations](#bias-risks-and-limitations) 4. [Training Details](#training-details) 5. [Evaluation](#evaluation) 6. [Environmental Impact](#environmental-impact) 7. [Citation](#citation) 8. [Model Card Authors](#model-card-authors) 9. [How To Get Started With the Model](#how-to-get-started-with-the-model) # Model Details ## Model Description The developers of the Text-To-Text Transfer Transformer (T5) [write](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html): > With T5, we propose reframing all NLP tasks into a unified text-to-text-format where the input and output are always text strings, in contrast to BERT-style models that can only output either a class label or a span of the input. Our text-to-text framework allows us to use the same model, loss function, and hyperparameters on any NLP task. T5-11B is the checkpoint with 11 billion parameters. - **Developed by:** Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. See [associated paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) and [GitHub repo](https://github.com/google-research/text-to-text-transfer-transformer#released-model-checkpoints) - **Model type:** Language model - **Language(s) (NLP):** English, French, Romanian, German - **License:** Apache 2.0 - **Related Models:** [All T5 Checkpoints](https://huggingface.co/models?search=t5) - **Resources for more information:** - [Research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) - [Google's T5 Blog Post](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) - [GitHub Repo](https://github.com/google-research/text-to-text-transfer-transformer) - [Hugging Face T5 Docs](https://huggingface.co/docs/transformers/model_doc/t5) # Uses ## Direct Use and Downstream Use The developers write in a [blog post](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) that the model: > Our text-to-text framework allows us to use the same model, loss function, and hyperparameters on any NLP task, including machine translation, document summarization, question answering, and classification tasks (e.g., sentiment analysis). We can even apply T5 to regression tasks by training it to predict the string representation of a number instead of the number itself. See the [blog post](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) and [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) for further details. ## Out-of-Scope Use More information needed. # Bias, Risks, and Limitations More information needed. ## Recommendations More information needed. # Training Details ## Training Data The model is pre-trained on the [Colossal Clean Crawled Corpus (C4)](https://www.tensorflow.org/datasets/catalog/c4), which was developed and released in the context of the same [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) as T5. The model was pre-trained on a on a **multi-task mixture of unsupervised (1.) and supervised tasks (2.)**. Thereby, the following datasets were being used for (1.) and (2.): 1. **Datasets used for Unsupervised denoising objective**: - [C4](https://huggingface.co/datasets/c4) - [Wiki-DPR](https://huggingface.co/datasets/wiki_dpr) 2. **Datasets used for Supervised text-to-text language modeling objective** - Sentence acceptability judgment - CoLA [Warstadt et al., 2018](https://arxiv.org/abs/1805.12471) - Sentiment analysis - SST-2 [Socher et al., 2013](https://nlp.stanford.edu/~socherr/EMNLP2013_RNTN.pdf) - Paraphrasing/sentence similarity - MRPC [Dolan and Brockett, 2005](https://aclanthology.org/I05-5002) - STS-B [Ceret al., 2017](https://arxiv.org/abs/1708.00055) - QQP [Iyer et al., 2017](https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs) - Natural language inference - MNLI [Williams et al., 2017](https://arxiv.org/abs/1704.05426) - QNLI [Rajpurkar et al.,2016](https://arxiv.org/abs/1606.05250) - RTE [Dagan et al., 2005](https://link.springer.com/chapter/10.1007/11736790_9) - CB [De Marneff et al., 2019](https://semanticsarchive.net/Archive/Tg3ZGI2M/Marneffe.pdf) - Sentence completion - COPA [Roemmele et al., 2011](https://www.researchgate.net/publication/221251392_Choice_of_Plausible_Alternatives_An_Evaluation_of_Commonsense_Causal_Reasoning) - Word sense disambiguation - WIC [Pilehvar and Camacho-Collados, 2018](https://arxiv.org/abs/1808.09121) - Question answering - MultiRC [Khashabi et al., 2018](https://aclanthology.org/N18-1023) - ReCoRD [Zhang et al., 2018](https://arxiv.org/abs/1810.12885) - BoolQ [Clark et al., 2019](https://arxiv.org/abs/1905.10044) ## Training Procedure In their [abstract](https://jmlr.org/papers/volume21/20-074/20-074.pdf), the model developers write: > In this paper, we explore the landscape of transfer learning techniques for NLP by introducing a unified framework that converts every language problem into a text-to-text format. Our systematic study compares pre-training objectives, architectures, unlabeled datasets, transfer approaches, and other factors on dozens of language understanding tasks. The framework introduced, the T5 framework, involves a training procedure that brings together the approaches studied in the paper. See the [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) for further details. # Evaluation ## Testing Data, Factors & Metrics The developers evaluated the model on 24 tasks, see the [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) for full details. ## Results For full results for T5-11B, see the [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf), Table 14. # Environmental Impact Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** Google Cloud TPU Pods - **Hours used:** More information needed - **Cloud Provider:** GCP - **Compute Region:** More information needed - **Carbon Emitted:** More information needed # Citation **BibTeX:** ```bibtex @article{2020t5, author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu}, title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer}, journal = {Journal of Machine Learning Research}, year = {2020}, volume = {21}, number = {140}, pages = {1-67}, url = {http://jmlr.org/papers/v21/20-074.html} } ``` **APA:** - Raffel, C., Shazeer, N., Roberts, A., Lee, K., Narang, S., Matena, M., ... & Liu, P. J. (2020). Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res., 21(140), 1-67. # Model Card Authors This model card was written by the team at Hugging Face. # How to Get Started with the Model ## Disclaimer **Before `transformers` v3.5.0**, due do its immense size, `t5-11b` required some special treatment. If you're using transformers `<= v3.4.0`, `t5-11b` should be loaded with flag `use_cdn` set to `False` as follows: ```python t5 = transformers.T5ForConditionalGeneration.from_pretrained('t5-11b', use_cdn = False) ``` Secondly, a single GPU will most likely not have enough memory to even load the model into memory as the weights alone amount to over 40 GB. - Model parallelism has to be used here to overcome this problem as is explained in this [PR](https://github.com/huggingface/transformers/pull/3578). - DeepSpeed's ZeRO-Offload is another approach as explained in this [post](https://github.com/huggingface/transformers/issues/9996). See the [Hugging Face T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Model) docs and a [Colab Notebook](https://colab.research.google.com/github/google-research/text-to-text-transfer-transformer/blob/main/notebooks/t5-trivia.ipynb) created by the model developers for more context.
HuggingFaceTB/SmolLM-135M
HuggingFaceTB
"2024-08-01T20:12:34Z"
88,379
162
transformers
[ "transformers", "onnx", "safetensors", "llama", "text-generation", "en", "dataset:HuggingFaceTB/smollm-corpus", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
"2024-07-14T21:45:18Z"
--- library_name: transformers license: apache-2.0 language: - en datasets: - HuggingFaceTB/smollm-corpus --- # SmolLM <center> <img src="https://huggingface.co/datasets/HuggingFaceTB/images/resolve/main/banner_smol.png" alt="SmolLM" width="1100" height="600"> </center> ## Table of Contents 1. [Model Summary](##model-summary) 2. [Limitations](##limitations) 3. [Training](##training) 4. [License](##license) 5. [Citation](##citation) ## Model Summary SmolLM is a series of state-of-the-art small language models available in three sizes: 135M, 360M, and 1.7B parameters. These models are built on Cosmo-Corpus, a meticulously curated high-quality training dataset. Cosmo-Corpus includes Cosmopedia v2 (28B tokens of synthetic textbooks and stories generated by Mixtral), Python-Edu (4B tokens of educational Python samples from The Stack), and FineWeb-Edu (220B tokens of deduplicated educational web samples from FineWeb). SmolLM models have shown promising results when compared to other models in their size categories across various benchmarks testing common sense reasoning and world knowledge. For detailed information on training, benchmarks and performance, please refer to our full [blog post](https://huggingface.co/blog/smollm). This is the SmolLM-135M ### Generation ```bash pip install transformers ``` #### Running the model on CPU/GPU/multi GPU * _Using full precision_ ```python # pip install transformers from transformers import AutoModelForCausalLM, AutoTokenizer checkpoint = "HuggingFaceTB/SmolLM-135M" device = "cuda" # for GPU usage or "cpu" for CPU usage tokenizer = AutoTokenizer.from_pretrained(checkpoint) # for multiple GPUs install accelerate and do `model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto")` model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device) inputs = tokenizer.encode("def print_hello_world():", return_tensors="pt").to(device) outputs = model.generate(inputs) print(tokenizer.decode(outputs[0])) ``` ```bash >>> print(f"Memory footprint: {model.get_memory_footprint() / 1e6:.2f} MB") Memory footprint: 12624.81 MB ``` * _Using `torch.bfloat16`_ ```python # pip install accelerate import torch from transformers import AutoTokenizer, AutoModelForCausalLM checkpoint = "HuggingFaceTB/SmolLM-135M" tokenizer = AutoTokenizer.from_pretrained(checkpoint) # for fp16 use `torch_dtype=torch.float16` instead model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto", torch_dtype=torch.bfloat16) inputs = tokenizer.encode("def print_hello_world():", return_tensors="pt").to("cuda") outputs = model.generate(inputs) print(tokenizer.decode(outputs[0])) ``` ```bash >>> print(f"Memory footprint: {model.get_memory_footprint() / 1e6:.2f} MB") Memory footprint: 269.03 MB ``` #### Quantized Versions through `bitsandbytes` * _Using 8-bit precision (int8)_ ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig # to use 4bit use `load_in_4bit=True` instead quantization_config = BitsAndBytesConfig(load_in_8bit=True) checkpoint = "HuggingFaceTB/SmolLM-135M" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForCausalLM.from_pretrained(checkpoint, quantization_config=quantization_config) inputs = tokenizer.encode("def print_hello_world():", return_tensors="pt").to("cuda") outputs = model.generate(inputs) print(tokenizer.decode(outputs[0])) ``` ```bash >>> print(f"Memory footprint: {model.get_memory_footprint() / 1e6:.2f} MB") # load_in_8bit Memory footprint: 162.87 MB # load_in_4bit >>> print(f"Memory footprint: {model.get_memory_footprint() / 1e6:.2f} MB") Memory footprint: 109.78 MB ``` # Limitations While SmolLM models have been trained on a diverse dataset including educational content and synthetic texts, they have limitations. The models primarily understand and generate content in English. They can produce text on a variety of topics, but the generated content may not always be factually accurate, logically consistent, or free from biases present in the training data. These models should be used as assistive tools rather than definitive sources of information. Users should always verify important information and critically evaluate any generated content. For a more comprehensive discussion of the models' capabilities and limitations, please refer to our full [blog post](https://huggingface.co/blog/smollm).. This repository contains a converted version of our latest trained model. We've noticed a small performance difference between this converted checkpoint (transformers) and the original (nanotron). We're currently working to resolve this issue. # Training ## Model - **Architecture:** For architecture detail, see the [blog post](https://huggingface.co/blog/smollm). - **Pretraining steps:** 600k - **Pretraining tokens:** 600B - **Precision:** bfloat16 - **Tokenizer:** [HuggingFaceTB/cosmo2-tokenizer](https://huggingface.co/HuggingFaceTB/cosmo2-tokenizer) ## Hardware - **GPUs:** 64 H100 ## Software - **Training Framework:** [Nanotron](https://github.com/huggingface/nanotron/tree/main) # License [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0) # Citation ```bash @misc{allal2024SmolLM, title={SmolLM - blazingly fast and remarkably powerful}, author={Loubna Ben Allal and Anton Lozhkov and Elie Bakouch and Leandro von Werra and Thomas Wolf}, year={2024}, } ```
mesolitica/sentiment-analysis-nanot5-small-malaysian-cased
mesolitica
"2023-10-08T07:38:48Z"
88,306
0
transformers
[ "transformers", "safetensors", "t5", "text-classification", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-classification
"2023-10-08T07:38:17Z"
Entry not found
unsloth/Llama-3.2-3B-Instruct-bnb-4bit
unsloth
"2024-09-30T09:34:13Z"
88,017
8
transformers
[ "transformers", "safetensors", "llama", "text-generation", "llama-3", "meta", "facebook", "unsloth", "conversational", "en", "base_model:meta-llama/Llama-3.2-3B-Instruct", "base_model:quantized:meta-llama/Llama-3.2-3B-Instruct", "license:llama3.2", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "4-bit", "bitsandbytes", "region:us" ]
text-generation
"2024-09-25T18:51:15Z"
--- base_model: meta-llama/Llama-3.2-3B-Instruct language: - en library_name: transformers license: llama3.2 tags: - llama-3 - llama - meta - facebook - unsloth - transformers --- # Finetune Llama 3.2, Gemma 2, Mistral 2-5x faster with 70% less memory via Unsloth! We have a free Google Colab Tesla T4 notebook for Llama 3.2 (3B) here: https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/Discord%20button.png" width="200"/>](https://discord.gg/unsloth) [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth) # unsloth/Llama-3.2-3B-Instruct-bnb-4bit For more details on the model, please go to Meta's original [model card](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) ## ✨ Finetune for Free All notebooks are **beginner friendly**! Add your dataset, click "Run All", and you'll get a 2x faster finetuned model which can be exported to GGUF, vLLM or uploaded to Hugging Face. | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------------|--------------------------------------------------------------------------------------------------------------------------|-------------|----------| | **Llama-3.2 (3B)** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) | 2.4x faster | 58% less | | **Llama-3.1 (11B vision)** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) | 2.4x faster | 58% less | | **Llama-3.1 (8B)** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) | 2.4x faster | 58% less | | **Phi-3.5 (mini)** | [▶️ Start on Colab](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) | 2x faster | 50% less | | **Gemma 2 (9B)** | [▶️ Start on Colab](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) | 2.4x faster | 58% less | | **Mistral (7B)** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 2.2x faster | 62% less | | **DPO - Zephyr** | [▶️ Start on Colab](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 19% less | - This [conversational notebook](https://colab.research.google.com/drive/1Aau3lgPzeZKQ-98h69CCu1UJcvIBLmy2?usp=sharing) is useful for ShareGPT ChatML / Vicuna templates. - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for raw text. This [DPO notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) replicates Zephyr. - \* Kaggle has 2x T4s, but we use 1. Due to overhead, 1x T4 is 5x faster. ## Special Thanks A huge thank you to the Meta and Llama team for creating and releasing these models. ## Model Information The Meta Llama 3.2 collection of multilingual large language models (LLMs) is a collection of pretrained and instruction-tuned generative models in 1B and 3B sizes (text in/text out). The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks. **Model developer**: Meta **Model Architecture:** Llama 3.2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety. **Supported languages:** English, German, French, Italian, Portuguese, Hindi, Spanish, and Thai are officially supported. Llama 3.2 has been trained on a broader collection of languages than these 8 supported languages. Developers may fine-tune Llama 3.2 models for languages beyond these supported languages, provided they comply with the Llama 3.2 Community License and the Acceptable Use Policy. Developers are always expected to ensure that their deployments, including those that involve additional languages, are completed safely and responsibly. **Llama 3.2 family of models** Token counts refer to pretraining data only. All model versions use Grouped-Query Attention (GQA) for improved inference scalability. **Model Release Date:** Sept 25, 2024 **Status:** This is a static model trained on an offline dataset. Future versions may be released that improve model capabilities and safety. **License:** Use of Llama 3.2 is governed by the [Llama 3.2 Community License](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE) (a custom, commercial license agreement). Where to send questions or comments about the model Instructions on how to provide feedback or comments on the model can be found in the model [README](https://github.com/meta-llama/llama3). For more technical information about generation parameters and recipes for how to use Llama 3.1 in applications, please go [here](https://github.com/meta-llama/llama-recipes).
katuni4ka/tiny-random-glm4
katuni4ka
"2024-09-21T08:34:24Z"
87,977
0
transformers
[ "transformers", "safetensors", "chatglm", "feature-extraction", "custom_code", "region:us" ]
feature-extraction
"2024-06-20T19:27:57Z"
Entry not found
miguelcarv/resnet-152-text-detector
miguelcarv
"2024-01-21T01:43:52Z"
87,906
0
transformers
[ "transformers", "safetensors", "resnet", "image-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
"2024-01-19T20:01:50Z"
# Model Card for ResNet-152 Text Detector This model was trained with the intent to quickly classify whether or not an image contains legible text or not. It was trained as a binary classification problem on the COCO-Text dataset together with some images from LLaVAR. This came out to a total of ~140k images, where 50% of them had text and 50% of them had no legible text. # Model Details ## How to Get Started with the Model ```python from PIL import Image import requests import torch from transformers import AutoImageProcessor, AutoModelForImageClassification model = AutoModelForImageClassification.from_pretrained( "miguelcarv/resnet-152-text-detector", ) processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50", do_resize=False) url = "http://images.cocodataset.org/train2017/000000044520.jpg" image = Image.open(requests.get(url, stream=True).raw).convert('RGB').resize((300,300)) inputs = processor(image, return_tensors="pt").pixel_values with torch.no_grad(): outputs = model(inputs) logits_per_image = outputs.logits probs = logits_per_image.softmax(dim=1) print(probs) # tensor([[0.1085, 0.8915]]) ``` # Training Details - Trained for three epochs - Resolution: 300x300 - Learning rate: 5e-5 - Optimizer: AdamW - Batch size: 64 - Trained with FP32
SG161222/Realistic_Vision_V6.0_B1_noVAE
SG161222
"2024-10-08T16:36:37Z"
87,851
197
diffusers
[ "diffusers", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
"2023-11-29T08:02:09Z"
--- license: creativeml-openrail-m --- <strong>Check my exclusive models on Mage: </strong><a href="https://www.mage.space/play/4371756b27bf52e7a1146dc6fe2d969c" rel="noopener noreferrer nofollow"><strong>ParagonXL</strong></a><strong> / </strong><a href="https://www.mage.space/play/df67a9f27f19629a98cb0fb619d1949a" rel="noopener noreferrer nofollow"><strong>NovaXL</strong></a><strong> / </strong><a href="https://www.mage.space/play/d8db06ae964310acb4e090eec03984df" rel="noopener noreferrer nofollow"><strong>NovaXL Lightning</strong></a><strong> / </strong><a href="https://www.mage.space/play/541da1e10976ab82976a5cacc770a413" rel="noopener noreferrer nofollow"><strong>NovaXL V2</strong></a><strong> / </strong><a href="https://www.mage.space/play/a56d2680c464ef25b8c66df126b3f706" rel="noopener noreferrer nofollow"><strong>NovaXL Pony</strong></a><strong> / </strong><a href="https://www.mage.space/play/b0ab6733c3be2408c93523d57a605371" rel="noopener noreferrer nofollow"><strong>NovaXL Pony Lightning</strong></a><strong> / </strong><a href="https://www.mage.space/play/e3b01cd493ed86ed8e4708751b1c9165" rel="noopener noreferrer nofollow"><strong>RealDreamXL</strong></a><strong> / </strong><a href="https://www.mage.space/play/ef062fc389c3f8723002428290c1158c" rel="noopener noreferrer nofollow"><strong>RealDreamXL Lightning</strong></a></p> <b>This model is available on <a href="https://www.mage.space/">Mage.Space</a> (main sponsor)</b><br> <b>You can support me directly on Boosty - https://boosty.to/sg_161222</b><br> <b>Please read this!</b><br> This is not yet the full version of the model (read the <b>"Model Description"</b> section).<br> For version 6.0 it is recommended to use with VAE (to improve generation quality and get rid of artifacts): https://huggingface.co/stabilityai/sd-vae-ft-mse-original<br> <b>Model Description</b><br> Realistic Vision V6.0 "New Vision" is a global update for the Realistic Vision model, which will be released gradually in several beta versions until the full release. The model is aimed at realism and photorealism.<br> CivitAI Page: https://civitai.com/models/4201/realistic-vision-v60-b1?modelVersionId=245598 <b>Resolutions (use lower resolution if you get a lot of mutations and stuff like that)</b><br> - Face Portrait: 896x896<br> - Portrait: 896x896, 768x1024<br> - Half Body: 768x1024, 640x1152<br> - Full Body: 896x896, 768x1024, 640x1152, 1024x768, 1152x640<br> <b>Improvements</b> - increased generation resolution to such resolutions as: 896x896, 768x1024, 640x1152, 1024x768, 1152x640. (note. in some cases there may still be mutations, duplications, etc -> will be fixed in future versions).<br> - improved sfw and nsfw for female and female anatomy (note. not all poses work correctly in such large resolutions -> will be fixed in future versions).<br> <b>Recommended Workflow</b><br> Images can be generated with or without Hires.Fix, but it will help improve the generation quality significantly. In some cases it is strictly recommended to use Hires.Fix, namely when generating full body and half body images (note: you can also use Restore Faces or ADetailer).<br> <b>Recommended Generation Parameters</b><br> Sampler: DPM++ SDE Karras (25+ steps) / DPM++ 2M SDE (50+ steps)<br> Negative Prompt: (deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime), text, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck<br> <b>Recommended Hires.Fix Parameters</b><br> Sampler: DPM++ SDE Karras or DPM++ 2M SDE<br> Denoising steps: 10+ (DPM++ SDE Karras) / 20+ (DPM++ 2M SDE (notice. the lower the value of hires steps at a given sampler, the stronger the skin texture and the higher the chance of getting artifacts))<br> Denoising strength: 0.1-0.3<br> Upscaler: 4x-UltraSharp / 4x_NMKD-Superscale-SP_178000_G or another<br> Upscale by: 1.1-2.0+<br>
Vikhrmodels/Vikhr-7B-instruct_0.4
Vikhrmodels
"2024-05-24T10:10:50Z"
87,844
29
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "ru", "en", "arxiv:2405.13929", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
"2024-04-10T11:54:34Z"
--- library_name: transformers language: - ru - en --- # Релиз вихря 0.3-0.4 Долили сильно больше данных в sft, теперь стабильнее работает json и multiturn, слегка подточили параметры претрена модели Added a lot more data to sft, now json and multiturn work more stable on long context and hard prompts - [Google Colab](https://colab.research.google.com/drive/15O9LwZhVUa1LWhZa2UKr_B-KOKenJBvv#scrollTo=5EeNFU2-9ERi) - [GGUF](https://huggingface.co/Vikhrmodels/Vikhr-7B-instruct_0.4-GGUF) ```python from transformers import AutoTokenizer, AutoModelForCausalLM import torch model = AutoModelForCausalLM.from_pretrained("Vikhrmodels/Vikhr-7B-instruct_0.4", device_map="auto", attn_implementation="flash_attention_2", torch_dtype=torch.bfloat16) tokenizer = AutoTokenizer.from_pretrained("Vikhrmodels/Vikhr-7B-instruct_0.4") from transformers import AutoTokenizer, pipeline pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) prompts = [ "В чем разница между фруктом и овощем?", "Годы жизни колмагорова?"] def test_inference(prompt): prompt = pipe.tokenizer.apply_chat_template([{"role": "user", "content": prompt}], tokenize=False, add_generation_prompt=True) print(prompt) outputs = pipe(prompt, max_new_tokens=512, do_sample=True, num_beams=1, temperature=0.25, top_k=50, top_p=0.98, eos_token_id=79097) return outputs[0]['generated_text'][len(prompt):].strip() for prompt in prompts: print(f" prompt:\n{prompt}") print(f" response:\n{test_inference(prompt)}") print("-"*50) ``` ``` @article{nikolich2024vikhr, title={Vikhr: The Family of Open-Source Instruction-Tuned Large Language Models for Russian}, author={Aleksandr Nikolich and Konstantin Korolev and Artem Shelmanov}, journal={arXiv preprint arXiv:2405.13929}, year={2024}, url={https://arxiv.org/pdf/2405.13929} } ```
google/mobilenet_v2_1.0_224
google
"2023-10-31T13:40:16Z"
87,840
12
transformers
[ "transformers", "pytorch", "safetensors", "mobilenet_v2", "image-classification", "vision", "dataset:imagenet-1k", "arxiv:1801.04381", "license:other", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
"2022-11-10T16:04:32Z"
--- license: other tags: - vision - image-classification datasets: - imagenet-1k widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace --- # MobileNet V2 MobileNet V2 model pre-trained on ImageNet-1k at resolution 224x224. It was introduced in [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) by Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen. It was first released in [this repository](https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet). Disclaimer: The team releasing MobileNet V2 did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description From the [original README](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md): > MobileNets are small, low-latency, low-power models parameterized to meet the resource constraints of a variety of use cases. They can be built upon for classification, detection, embeddings and segmentation similar to how other popular large scale models, such as Inception, are used. MobileNets can be run efficiently on mobile devices [...] MobileNets trade off between latency, size and accuracy while comparing favorably with popular models from the literature. The checkpoints are named **mobilenet\_v2\_*depth*\_*size***, for example **mobilenet\_v2\_1.0\_224**, where **1.0** is the depth multiplier and **224** is the resolution of the input images the model was trained on. ## Intended uses & limitations You can use the raw model for image classification. See the [model hub](https://huggingface.co/models?search=mobilenet_v2) to look for fine-tuned versions on a task that interests you. ### How to use Here is how to use this model to classify an image of the COCO 2017 dataset into one of the 1,000 ImageNet classes: ```python from transformers import AutoImageProcessor, AutoModelForImageClassification from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) preprocessor = AutoImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224") model = AutoModelForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224") inputs = preprocessor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]) ``` Note: This model actually predicts 1001 classes, the 1000 classes from ImageNet plus an extra “background” class (index 0). Currently, both the feature extractor and model support PyTorch. ### BibTeX entry and citation info ```bibtex @inproceedings{mobilenetv22018, title={MobileNetV2: Inverted Residuals and Linear Bottlenecks}, author={Mark Sandler and Andrew Howard and Menglong Zhu and Andrey Zhmoginov and Liang-Chieh Chen}, booktitle={CVPR}, year={2018} } ```
katuni4ka/tiny-random-falcon-40b
katuni4ka
"2024-07-02T10:36:30Z"
87,730
0
transformers
[ "transformers", "safetensors", "falcon", "text-generation", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
"2024-04-24T16:21:21Z"
Entry not found
sentence-transformers/msmarco-bert-base-dot-v5
sentence-transformers
"2024-10-10T15:03:22Z"
87,539
15
sentence-transformers
[ "sentence-transformers", "pytorch", "tf", "onnx", "safetensors", "openvino", "bert", "feature-extraction", "sentence-similarity", "transformers", "en", "arxiv:1908.10084", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
sentence-similarity
"2022-03-02T23:29:05Z"
--- language: - en library_name: sentence-transformers tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers pipeline_tag: sentence-similarity --- # msmarco-bert-base-dot-v5 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and was designed for **semantic search**. It has been trained on 500K (query, answer) pairs from the [MS MARCO dataset](https://github.com/microsoft/MSMARCO-Passage-Ranking/). For an introduction to semantic search, have a look at: [SBERT.net - Semantic Search](https://www.sbert.net/examples/applications/semantic-search/README.html) ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer, util query = "How many people live in London?" docs = ["Around 9 Million people live in London", "London is known for its financial district"] #Load the model model = SentenceTransformer('sentence-transformers/msmarco-bert-base-dot-v5') #Encode query and documents query_emb = model.encode(query) doc_emb = model.encode(docs) #Compute dot score between query and all document embeddings scores = util.dot_score(query_emb, doc_emb)[0].cpu().tolist() #Combine docs & scores doc_score_pairs = list(zip(docs, scores)) #Sort by decreasing score doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True) #Output passages & scores print("Query:", query) for doc, score in doc_score_pairs: print(score, doc) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the correct pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output.last_hidden_state input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) #Encode text def encode(texts): # Tokenize sentences encoded_input = tokenizer(texts, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input, return_dict=True) # Perform pooling embeddings = mean_pooling(model_output, encoded_input['attention_mask']) return embeddings # Sentences we want sentence embeddings for query = "How many people live in London?" docs = ["Around 9 Million people live in London", "London is known for its financial district"] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/msmarco-bert-base-dot-v5") model = AutoModel.from_pretrained("sentence-transformers/msmarco-bert-base-dot-v5") #Encode query and docs query_emb = encode(query) doc_emb = encode(docs) #Compute dot score between query and all document embeddings scores = torch.mm(query_emb, doc_emb.transpose(0, 1))[0].cpu().tolist() #Combine docs & scores doc_score_pairs = list(zip(docs, scores)) #Sort by decreasing score doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True) #Output passages & scores print("Query:", query) for doc, score in doc_score_pairs: print(score, doc) ``` ## Technical Details In the following some technical details how this model must be used: | Setting | Value | | --- | :---: | | Dimensions | 768 | | Max Sequence Length | 512 | | Produces normalized embeddings | No | | Pooling-Method | Mean pooling | | Suitable score functions | dot-product (e.g. `util.dot_score`) | ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=msmarco-bert-base-base-dot-v5) ## Training See `train_script.py` in this repository for the used training script. The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 7858 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MarginMSELoss.MarginMSELoss` Parameters of the fit()-Method: ``` { "callback": null, "epochs": 30, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 1e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 10000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: bert-base-uncased (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors This model was trained by [sentence-transformers](https://www.sbert.net/). If you find this model helpful, feel free to cite our publication [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](https://arxiv.org/abs/1908.10084): ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "http://arxiv.org/abs/1908.10084", } ```
unslothai/vram-8
unslothai
"2024-07-07T17:00:53Z"
87,508
0
transformers
[ "transformers", "safetensors", "llama", "feature-extraction", "text-generation-inference", "endpoints_compatible", "region:us" ]
feature-extraction
"2024-07-07T17:00:26Z"
--- library_name: transformers tags: [] ---
ar9av/bart_kw_extractor
ar9av
"2024-09-12T16:32:47Z"
87,229
0
null
[ "safetensors", "bart", "region:us" ]
null
"2024-09-12T16:22:42Z"
Entry not found
Dremmar/nsfw-xl
Dremmar
"2024-01-07T11:19:41Z"
87,067
71
diffusers
[ "diffusers", "text-to-image", "stable-diffusion", "lora", "template:sd-lora", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "region:us", "not-for-all-audiences" ]
text-to-image
"2024-01-07T11:18:33Z"
--- tags: - text-to-image - stable-diffusion - lora - diffusers - template:sd-lora widget: - text: "UNICODE\0\0a\0n\0a\0l\0o\0g\0 \0f\0i\0l\0m\0 \0p\0h\0o\0t\0o\0 \0w\0o\0m\0a\0n\0,\0 \0b\0r\0e\0a\0s\0t\0s\0,\0 \0h\0e\0a\0t\0s\0h\0o\0t\0,\0 \0f\0a\0c\0i\0n\0g\0 \0v\0i\0e\0w\0e\0r\0 \0<\0l\0o\0r\0a\0:\0n\0s\0f\0w\0-\0x\0l\0-\02\0.\00\0:\01\0>\0 \0.\0 \0f\0a\0d\0e\0d\0 \0f\0i\0l\0m\0,\0 \0d\0e\0s\0a\0t\0u\0r\0a\0t\0e\0d\0,\0 \03\05\0m\0m\0 \0p\0h\0o\0t\0o\0,\0 \0g\0r\0a\0i\0n\0y\0,\0 \0v\0i\0g\0n\0e\0t\0t\0e\0,\0 \0v\0i\0n\0t\0a\0g\0e\0,\0 \0K\0o\0d\0a\0c\0h\0r\0o\0m\0e\0,\0 \0L\0o\0m\0o\0g\0r\0a\0p\0h\0y\0,\0 \0s\0t\0a\0i\0n\0e\0d\0,\0 \0h\0i\0g\0h\0l\0y\0 \0d\0e\0t\0a\0i\0l\0e\0d\0,\0 \0f\0o\0u\0n\0d\0 \0f\0o\0o\0t\0a\0g\0e\0" output: url: images/00097-3192725504.jpeg base_model: stabilityai/stable-diffusion-xl-base-1.0 instance_prompt: null --- # nsfw-xl <Gallery /> ## Model description just copy of https:&#x2F;&#x2F;civitai.com&#x2F;models&#x2F;141300&#x2F;nsfw-xl ## Download model Weights for this model are available in Safetensors format. [Download](/Dremmar/nsfw-xl/tree/main) them in the Files & versions tab.
facebook/opt-1.3b
facebook
"2023-09-15T13:09:33Z"
86,972
148
transformers
[ "transformers", "pytorch", "tf", "jax", "opt", "text-generation", "en", "arxiv:2205.01068", "arxiv:2005.14165", "license:other", "autotrain_compatible", "text-generation-inference", "region:us" ]
text-generation
"2022-05-11T08:26:00Z"
--- language: en inference: false tags: - text-generation - opt license: other commercial: false --- # OPT : Open Pre-trained Transformer Language Models OPT was first introduced in [Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) and first released in [metaseq's repository](https://github.com/facebookresearch/metaseq) on May 3rd 2022 by Meta AI. **Disclaimer**: The team releasing OPT wrote an official model card, which is available in Appendix D of the [paper](https://arxiv.org/pdf/2205.01068.pdf). Content from **this** model card has been written by the Hugging Face team. ## Intro To quote the first two paragraphs of the [official paper](https://arxiv.org/abs/2205.01068) > Large language models trained on massive text collections have shown surprising emergent > capabilities to generate text and perform zero- and few-shot learning. While in some cases the public > can interact with these models through paid APIs, full model access is currently limited to only a > few highly resourced labs. This restricted access has limited researchers’ ability to study how and > why these large language models work, hindering progress on improving known challenges in areas > such as robustness, bias, and toxicity. > We present Open Pretrained Transformers (OPT), a suite of decoder-only pre-trained transformers ranging from 125M > to 175B parameters, which we aim to fully and responsibly share with interested researchers. We train the OPT models to roughly match > the performance and sizes of the GPT-3 class of models, while also applying the latest best practices in data > collection and efficient training. Our aim in developing this suite of OPT models is to enable reproducible and responsible research at scale, and > to bring more voices to the table in studying the impact of these LLMs. Definitions of risk, harm, bias, and toxicity, etc., should be articulated by the > collective research community as a whole, which is only possible when models are available for study. ## Model description OPT was predominantly pretrained with English text, but a small amount of non-English data is still present within the training corpus via CommonCrawl. The model was pretrained using a causal language modeling (CLM) objective. OPT belongs to the same family of decoder-only models like [GPT-3](https://arxiv.org/abs/2005.14165). As such, it was pretrained using the self-supervised causal language modedling objective. For evaluation, OPT follows [GPT-3](https://arxiv.org/abs/2005.14165) by using their prompts and overall experimental setup. For more details, please read the [official paper](https://arxiv.org/abs/2205.01068). ## Intended uses & limitations The pretrained-only model can be used for prompting for evaluation of downstream tasks as well as text generation. In addition, the model can be fine-tuned on a downstream task using the [CLM example](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling). For all other OPT checkpoints, please have a look at the [model hub](https://huggingface.co/models?filter=opt). ### How to use You can use this model directly with a pipeline for text generation. ```python >>> from transformers import pipeline >>> generator = pipeline('text-generation', model="facebook/opt-1.3b") >>> generator("What are we having for dinner?") [{'generated_text': 'What are we having for dinner?\nI'm not sure. I'm not a chef. I'}] ``` By default, generation is deterministic. In order to use the top-k sampling, please set `do_sample` to `True`. ```python >>> from transformers import pipeline, set_seed >>> set_seed(32) >>> generator = pipeline('text-generation', model="facebook/opt-1.3b", do_sample=True) >>> generator("What are we having for dinner?") [{'generated_text': "What are we having for dinner?\nI have had chicken and rice for lunch. It is delicious"}] ``` ### Limitations and bias As mentioned in Meta AI's model card, given that the training data used for this model contains a lot of unfiltered content from the internet, which is far from neutral the model is strongly biased : > Like other large language models for which the diversity (or lack thereof) of training > data induces downstream impact on the quality of our model, OPT-175B has limitations in terms > of bias and safety. OPT-175B can also have quality issues in terms of generation diversity and > hallucination. In general, OPT-175B is not immune from the plethora of issues that plague modern > large language models. Here's an example of how the model can have biased predictions: ```python >>> from transformers import pipeline, set_seed >>> set_seed(32) >>> generator = pipeline('text-generation', model="facebook/opt-1.3b", do_sample=True, num_return_sequences=5) >>> generator("The woman worked as a") [{'generated_text': 'The woman worked as a bartender for six months before getting to the job she always dreamed of. She'}, {'generated_text': 'The woman worked as a nanny in a house near The White Horse Farm in the Yorkshire Dales'}, {'generated_text': "The woman worked as a translator at the British Broadcasting Corporation's headquarters and was also an acquaintance of some"}, {'generated_text': 'The woman worked as a secretary and went to school full-time, and also worked as a waitress'}, {'generated_text': 'The woman worked as a beautician with her baby and the little girl is now at the age where'}] ``` compared to: ```python >>> from transformers import pipeline, set_seed >>> set_seed(32) >>> generator = pipeline('text-generation', model="facebook/opt-1.3b", do_sample=True, num_return_sequences=5) >>> generator("The man worked as a") [{'generated_text': 'The man worked as a janitor and the owner of the house he worked at caught him cheating on'}, {'generated_text': 'The man worked as a software engineer.\n\nFor over 10 years, he had been at Amazon'}, {'generated_text': 'The man worked as a car salesman - and was a man of his word to her\nA T'}, {'generated_text': 'The man worked as a private contractor for five years. He went to the Bahamas in the summer of'}, {'generated_text': 'The man worked as a computer systems consultant. After leaving the job, he became a prolific internet hacker'}] ``` This bias will also affect all fine-tuned versions of this model. ## Training data The Meta AI team wanted to train this model on a corpus as large as possible. It is composed of the union of the following 5 filtered datasets of textual documents: - BookCorpus, which consists of more than 10K unpublished books, - CC-Stories, which contains a subset of CommonCrawl data filtered to match the story-like style of Winograd schemas, - The Pile, from which * Pile-CC, OpenWebText2, USPTO, Project Gutenberg, OpenSubtitles, Wikipedia, DM Mathematics and HackerNews* were included. - Pushshift.io Reddit dataset that was developed in Baumgartner et al. (2020) and processed in Roller et al. (2021) - CCNewsV2 containing an updated version of the English portion of the CommonCrawl News dataset that was used in RoBERTa (Liu et al., 2019b) The final training data contains 180B tokens corresponding to 800GB of data. The validation split was made of 200MB of the pretraining data, sampled proportionally to each dataset’s size in the pretraining corpus. The dataset might contains offensive content as parts of the dataset are a subset of public Common Crawl data, along with a subset of public Reddit data, which could contain sentences that, if viewed directly, can be insulting, threatening, or might otherwise cause anxiety. ### Collection process The dataset was collected form internet, and went through classic data processing algorithms and re-formatting practices, including removing repetitive/non-informative text like *Chapter One* or *This ebook by Project Gutenberg.* ## Training procedure ### Preprocessing The texts are tokenized using the **GPT2** byte-level version of Byte Pair Encoding (BPE) (for unicode characters) and a vocabulary size of 50272. The inputs are sequences of 2048 consecutive tokens. The 175B model was trained on 992 *80GB A100 GPUs*. The training duration was roughly ~33 days of continuous training. ### BibTeX entry and citation info ```bibtex @misc{zhang2022opt, title={OPT: Open Pre-trained Transformer Language Models}, author={Susan Zhang and Stephen Roller and Naman Goyal and Mikel Artetxe and Moya Chen and Shuohui Chen and Christopher Dewan and Mona Diab and Xian Li and Xi Victoria Lin and Todor Mihaylov and Myle Ott and Sam Shleifer and Kurt Shuster and Daniel Simig and Punit Singh Koura and Anjali Sridhar and Tianlu Wang and Luke Zettlemoyer}, year={2022}, eprint={2205.01068}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
MLP-KTLim/llama-3-Korean-Bllossom-8B
MLP-KTLim
"2024-08-12T04:01:02Z"
86,964
262
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "en", "ko", "arxiv:2403.10882", "arxiv:2403.11399", "base_model:meta-llama/Meta-Llama-3-8B", "base_model:finetune:meta-llama/Meta-Llama-3-8B", "license:llama3", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
"2024-04-25T10:16:43Z"
--- base_model: - meta-llama/Meta-Llama-3-8B language: - en - ko library_name: transformers license: llama3 --- <a href="https://github.com/MLP-Lab/Bllossom"> <img src="https://github.com/teddysum/bllossom/blob/main//bllossom_icon.png?raw=true" width="40%" height="50%"> </a> # Update! * ~~[2024.08.09] Llama3.1 버전을 기반으로한 Bllossom-8B로 모델을 업데이트 했습니다. 기존 llama3기반 Bllossom 보다 평균 5%정도 성능 향상이 있었습니다.~~(수정중에 있습니다.) * [2024.06.18] 사전학습량을 **250GB**까지 늘린 Bllossom ELO모델로 업데이트 되었습니다. 다만 단어확장은 하지 않았습니다. 기존 단어확장된 long-context 모델을 활용하고 싶으신분은 개인연락주세요! * [2024.06.18] Bllossom ELO 모델은 자체 개발한 ELO사전학습 기반으로 새로운 학습된 모델입니다. [LogicKor](https://github.com/StableFluffy/LogicKor) 벤치마크 결과 현존하는 한국어 10B이하 모델중 SOTA점수를 받았습니다. LogicKor 성능표 : | Model | Math | Reasoning | Writing | Coding | Understanding | Grammar | Single ALL | Multi ALL | Overall | |:---------:|:-----:|:------:|:-----:|:-----:|:----:|:-----:|:-----:|:-----:|:----:| | gpt-3.5-turbo-0125 | 7.14 | 7.71 | 8.28 | 5.85 | 9.71 | 6.28 | 7.50 | 7.95 | 7.72 | | gemini-1.5-pro-preview-0215 | 8.00 | 7.85 | 8.14 | 7.71 | 8.42 | 7.28 | 7.90 | 6.26 | 7.08 | | llama-3-Korean-Bllossom-8B | 5.43 | 8.29 | 9.0 | 4.43 | 7.57 | 6.86 | 6.93 | 6.93 | 6.93 | # Bllossom | [Demo]() | [Homepage](https://www.bllossom.ai/) | [Github](https://github.com/MLP-Lab/Bllossom) | <!-- [GPU용 Colab 코드예제](https://colab.research.google.com/drive/1fBOzUVZ6NRKk_ugeoTbAOokWKqSN47IG?usp=sharing) | --> <!-- [CPU용 Colab 양자화모델 코드예제](https://colab.research.google.com/drive/129ZNVg5R2NPghUEFHKF0BRdxsZxinQcJ?usp=drive_link) --> ```bash 저희 Bllossom팀 에서 한국어-영어 이중 언어모델인 Bllossom을 공개했습니다! 서울과기대 슈퍼컴퓨팅 센터의 지원으로 100GB가넘는 한국어로 모델전체를 풀튜닝한 한국어 강화 이중언어 모델입니다! 한국어 잘하는 모델 찾고 있지 않으셨나요? - 한국어 최초! 무려 3만개가 넘는 한국어 어휘확장 - Llama3대비 대략 25% 더 긴 길이의 한국어 Context 처리가능 - 한국어-영어 Pararell Corpus를 활용한 한국어-영어 지식연결 (사전학습) - 한국어 문화, 언어를 고려해 언어학자가 제작한 데이터를 활용한 미세조정 - 강화학습 이 모든게 한꺼번에 적용되고 상업적 이용이 가능한 Bllossom을 이용해 여러분 만의 모델을 만들어보세욥! 무려 Colab 무료 GPU로 학습이 가능합니다. 혹은 양자화 모델로 CPU에올려보세요 [양자화모델](https://huggingface.co/MLP-KTLim/llama-3-Korean-Bllossom-8B-4bit) 1. Bllossom-8B는 서울과기대, 테디썸, 연세대 언어자원 연구실의 언어학자와 협업해 만든 실용주의기반 언어모델입니다! 앞으로 지속적인 업데이트를 통해 관리하겠습니다 많이 활용해주세요 🙂 2. 초 강력한 Advanced-Bllossom 8B, 70B모델, 시각-언어모델을 보유하고 있습니다! (궁금하신분은 개별 연락주세요!!) 3. Bllossom은 NAACL2024, LREC-COLING2024 (구두) 발표로 채택되었습니다. 4. 좋은 언어모델 계속 업데이트 하겠습니다!! 한국어 강화를위해 공동 연구하실분(특히논문) 언제든 환영합니다!! 특히 소량의 GPU라도 대여 가능한팀은 언제든 연락주세요! 만들고 싶은거 도와드려요. ``` The Bllossom language model is a Korean-English bilingual language model based on the open-source LLama3. It enhances the connection of knowledge between Korean and English. It has the following features: * **Knowledge Linking**: Linking Korean and English knowledge through additional training * **Vocabulary Expansion**: Expansion of Korean vocabulary to enhance Korean expressiveness. * **Instruction Tuning**: Tuning using custom-made instruction following data specialized for Korean language and Korean culture * **Human Feedback**: DPO has been applied * **Vision-Language Alignment**: Aligning the vision transformer with this language model **This model developed by [MLPLab at Seoultech](http://mlp.seoultech.ac.kr), [Teddysum](http://teddysum.ai/) and [Yonsei Univ](https://sites.google.com/view/hansaemkim/hansaem-kim)** ## Demo Video <div style="display: flex; justify-content: space-between;"> <!-- 첫 번째 컬럼 --> <div style="width: 49%;"> <a> <img src="https://github.com/lhsstn/lhsstn/blob/main/x-llava_dem.gif?raw=true" style="width: 100%; height: auto;"> </a> <p style="text-align: center;">Bllossom-V Demo</p> </div> <!-- 두 번째 컬럼 (필요하다면) --> <div style="width: 49%;"> <a> <img src="https://github.com/lhsstn/lhsstn/blob/main/bllossom_demo_kakao.gif?raw=true" style="width: 70%; height: auto;"> </a> <p style="text-align: center;">Bllossom Demo(Kakao)ㅤㅤㅤㅤㅤㅤㅤㅤ</p> </div> </div> # NEWS * [2024.06.18] We have reverted to the non-vocab-expansion model. However, we have significantly increased the amount of pre-training data to 250GB. * [2024.05.08] Vocab Expansion Model Update * [2024.04.25] We released Bllossom v2.0, based on llama-3 ## Example code ### Colab Tutorial - [Inference-Code-Link](https://colab.research.google.com/drive/1fBOzUVZ6NRKk_ugeoTbAOokWKqSN47IG?usp=sharing) ### Install Dependencies ```bash pip install torch transformers==4.40.0 accelerate ``` ### Python code with Pipeline ```python import transformers import torch model_id = "MLP-KTLim/llama-3-Korean-Bllossom-8B" pipeline = transformers.pipeline( "text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device_map="auto", ) pipeline.model.eval() PROMPT = '''You are a helpful AI assistant. Please answer the user's questions kindly. 당신은 유능한 AI 어시스턴트 입니다. 사용자의 질문에 대해 친절하게 답변해주세요.''' instruction = "서울의 유명한 관광 코스를 만들어줄래?" messages = [ {"role": "system", "content": f"{PROMPT}"}, {"role": "user", "content": f"{instruction}"} ] prompt = pipeline.tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) terminators = [ pipeline.tokenizer.eos_token_id, pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>") ] outputs = pipeline( prompt, max_new_tokens=2048, eos_token_id=terminators, do_sample=True, temperature=0.6, top_p=0.9 ) print(outputs[0]["generated_text"][len(prompt):]) ``` ``` # 물론이죠! 서울은 다양한 문화와 역사, 자연을 겸비한 도시로, 많은 관광 명소를 자랑합니다. 여기 서울의 유명한 관광 코스를 소개해 드릴게요. ### 코스 1: 역사와 문화 탐방 1. **경복궁** - 서울의 대표적인 궁궐로, 조선 왕조의 역사와 문화를 체험할 수 있는 곳입니다. 2. **북촌 한옥마을** - 전통 한옥이 잘 보존된 마을로, 조선시대의 생활상을 느낄 수 있습니다. 3. **인사동** - 전통 문화와 현대 예술이 공존하는 거리로, 다양한 갤러리와 전통 음식점이 있습니다. 4. **청계천** - 서울의 중심에 위치한 천문으로, 조깅과 산책을 즐길 수 있는 곳입니다. ### 코스 2: 자연과 쇼핑 1. **남산 서울타워** - 서울의 전경을 한눈에 볼 수 있는 곳으로, 특히 저녁 시간대에 일몰을 감상하는 것이 좋습니다. 2. **명동** - 쇼핑과 음식점이 즐비한 지역으로, 다양한 브랜드와 전통 음식을 맛볼 수 있습니다. 3. **한강공원** - 서울의 주요 공원 중 하나로, 조깅, 자전거 타기, 배낭 여행을 즐길 수 있습니다. 4. **홍대** - 젊은이들이 즐겨 찾는 지역으로, 다양한 카페, 레스토랑, 클럽이 있습니다. ### 코스 3: 현대와 전통의 조화 1. **동대문 디자인 플라자 (DDP)** - 현대적인 건축물로, 다양한 전시와 이벤트가 열리는 곳입니다. 2. **이태원** - 다양한 국제 음식과 카페가 있는 지역으로, 다양한 문화를 경험할 수 있습니다. 3. **광화문** - 서울의 중심에 위치한 광장으로, 다양한 공연과 행사가 열립니다. 4. **서울랜드** - 서울 외곽에 위치한 테마파크로, 가족단위 관광객들에게 인기 있는 곳입니다. 이 코스들은 서울의 다양한 면모를 경험할 수 있도록 구성되어 있습니다. 각 코스마다 시간을 조절하고, 개인의 관심사에 맞게 선택하여 방문하면 좋을 것 같습니다. 즐거운 여행 되세요! ``` ### Python code with AutoModel ```python import os import torch from transformers import AutoTokenizer, AutoModelForCausalLM model_id = 'MLP-KTLim/llama-3-Korean-Bllossom-8B' tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.bfloat16, device_map="auto", ) model.eval() PROMPT = '''You are a helpful AI assistant. Please answer the user's questions kindly. 당신은 유능한 AI 어시스턴트 입니다. 사용자의 질문에 대해 친절하게 답변해주세요.''' instruction = "서울의 유명한 관광 코스를 만들어줄래?" messages = [ {"role": "system", "content": f"{PROMPT}"}, {"role": "user", "content": f"{instruction}"} ] input_ids = tokenizer.apply_chat_template( messages, add_generation_prompt=True, return_tensors="pt" ).to(model.device) terminators = [ tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<|eot_id|>") ] outputs = model.generate( input_ids, max_new_tokens=2048, eos_token_id=terminators, do_sample=True, temperature=0.6, top_p=0.9 ) print(tokenizer.decode(outputs[0][input_ids.shape[-1]:], skip_special_tokens=True)) ``` ``` # 물론이죠! 서울은 다양한 문화와 역사, 자연을 겸비한 도시로, 많은 관광 명소를 자랑합니다. 여기 서울의 유명한 관광 코스를 소개해 드릴게요. ### 코스 1: 역사와 문화 탐방 1. **경복궁** - 서울의 대표적인 궁궐로, 조선 왕조의 역사와 문화를 체험할 수 있는 곳입니다. 2. **북촌 한옥마을** - 전통 한옥이 잘 보존된 마을로, 조선시대의 생활상을 느낄 수 있습니다. 3. **인사동** - 전통 문화와 현대 예술이 공존하는 거리로, 다양한 갤러리와 전통 음식점이 있습니다. 4. **청계천** - 서울의 중심에 위치한 천문으로, 조깅과 산책을 즐길 수 있는 곳입니다. ### 코스 2: 자연과 쇼핑 1. **남산 서울타워** - 서울의 전경을 한눈에 볼 수 있는 곳으로, 특히 저녁 시간대에 일몰을 감상하는 것이 좋습니다. 2. **명동** - 쇼핑과 음식점이 즐비한 지역으로, 다양한 브랜드와 전통 음식을 맛볼 수 있습니다. 3. **한강공원** - 서울의 주요 공원 중 하나로, 조깅, 자전거 타기, 배낭 여행을 즐길 수 있습니다. 4. **홍대** - 젊은이들이 즐겨 찾는 지역으로, 다양한 카페, 레스토랑, 클럽이 있습니다. ### 코스 3: 현대와 전통의 조화 1. **동대문 디자인 플라자 (DDP)** - 현대적인 건축물로, 다양한 전시와 이벤트가 열리는 곳입니다. 2. **이태원** - 다양한 국제 음식과 카페가 있는 지역으로, 다양한 문화를 경험할 수 있습니다. 3. **광화문** - 서울의 중심에 위치한 광장으로, 다양한 공연과 행사가 열립니다. 4. **서울랜드** - 서울 외곽에 위치한 테마파크로, 가족단위 관광객들에게 인기 있는 곳입니다. 이 코스들은 서울의 다양한 면모를 경험할 수 있도록 구성되어 있습니다. 각 코스마다 시간을 조절하고, 개인의 관심사에 맞게 선택하여 방문하면 좋을 것 같습니다. 즐거운 여행 되세요! ``` ## Citation **Language Model** ```text @misc{bllossom, author = {ChangSu Choi, Yongbin Jeong, Seoyoon Park, InHo Won, HyeonSeok Lim, SangMin Kim, Yejee Kang, Chanhyuk Yoon, Jaewan Park, Yiseul Lee, HyeJin Lee, Younggyun Hahm, Hansaem Kim, KyungTae Lim}, title = {Optimizing Language Augmentation for Multilingual Large Language Models: A Case Study on Korean}, year = {2024}, journal = {LREC-COLING 2024}, paperLink = {\url{https://arxiv.org/pdf/2403.10882}}, }, } ``` **Vision-Language Model** ```text @misc{bllossom-V, author = {Dongjae Shin, Hyunseok Lim, Inho Won, Changsu Choi, Minjun Kim, Seungwoo Song, Hangyeol Yoo, Sangmin Kim, Kyungtae Lim}, title = {X-LLaVA: Optimizing Bilingual Large Vision-Language Alignment}, year = {2024}, publisher = {GitHub}, journal = {NAACL 2024 findings}, paperLink = {\url{https://arxiv.org/pdf/2403.11399}}, }, } ``` ## Contact - 임경태(KyungTae Lim), Professor at Seoultech. `ktlim@seoultech.ac.kr` - 함영균(Younggyun Hahm), CEO of Teddysum. `hahmyg@teddysum.ai` - 김한샘(Hansaem Kim), Professor at Yonsei. `khss@yonsei.ac.kr` ## Contributor - 최창수(Chansu Choi), choics2623@seoultech.ac.kr - 김상민(Sangmin Kim), sangmin9708@naver.com - 원인호(Inho Won), wih1226@seoultech.ac.kr - 김민준(Minjun Kim), mjkmain@seoultech.ac.kr - 송승우(Seungwoo Song), sswoo@seoultech.ac.kr - 신동재(Dongjae Shin), dylan1998@seoultech.ac.kr - 임현석(Hyeonseok Lim), gustjrantk@seoultech.ac.kr - 육정훈(Jeonghun Yuk), usually670@gmail.com - 유한결(Hangyeol Yoo), 21102372@seoultech.ac.kr - 송서현(Seohyun Song), alexalex225225@gmail.com
pierreguillou/ner-bert-base-cased-pt-lenerbr
pierreguillou
"2021-12-29T19:32:39Z"
86,753
15
transformers
[ "transformers", "pytorch", "bert", "token-classification", "generated_from_trainer", "pt", "dataset:lener_br", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
"2022-03-02T23:29:05Z"
--- language: - pt tags: - generated_from_trainer datasets: - lener_br metrics: - precision - recall - f1 - accuracy model-index: - name: checkpoints results: - task: name: Token Classification type: token-classification dataset: name: lener_br type: lener_br metrics: - name: F1 type: f1 value: 0.8926146010186757 - name: Precision type: precision value: 0.8810222036028488 - name: Recall type: recall value: 0.9045161290322581 - name: Accuracy type: accuracy value: 0.9759397808828684 - name: Loss type: loss value: 0.18803243339061737 widget: - text: "Ao Instituto Médico Legal da jurisdição do acidente ou da residência cumpre fornecer, no prazo de 90 dias, laudo à vítima (art. 5, § 5, Lei n. 6.194/74 de 19 de dezembro de 1974), função técnica que pode ser suprida por prova pericial realizada por ordem do juízo da causa, ou por prova técnica realizada no âmbito administrativo que se mostre coerente com os demais elementos de prova constante dos autos." - text: "Acrescento que não há de se falar em violação do artigo 114, § 3º, da Constituição Federal, posto que referido dispositivo revela-se impertinente, tratando da possibilidade de ajuizamento de dissídio coletivo pelo Ministério Público do Trabalho nos casos de greve em atividade essencial." - text: "Dispõe sobre o estágio de estudantes; altera a redação do art. 428 da Consolidação das Leis do Trabalho – CLT, aprovada pelo Decreto-Lei no 5.452, de 1o de maio de 1943, e a Lei no 9.394, de 20 de dezembro de 1996; revoga as Leis nos 6.494, de 7 de dezembro de 1977, e 8.859, de 23 de março de 1994, o parágrafo único do art. 82 da Lei no 9.394, de 20 de dezembro de 1996, e o art. 6o da Medida Provisória no 2.164-41, de 24 de agosto de 2001; e dá outras providências." --- ## (BERT base) NER model in the legal domain in Portuguese (LeNER-Br) **ner-bert-base-portuguese-cased-lenerbr** is a NER model (token classification) in the legal domain in Portuguese that was finetuned on 20/12/2021 in Google Colab from the model [pierreguillou/bert-base-cased-pt-lenerbr](https://huggingface.co/pierreguillou/bert-base-cased-pt-lenerbr) on the dataset [LeNER_br](https://huggingface.co/datasets/lener_br) by using a NER objective. Due to the small size of BERTimbau base and finetuning dataset, the model overfitted before to reach the end of training. Here are the overall final metrics on the validation dataset (*note: see the paragraph "Validation metrics by Named Entity" to get detailed metrics*): - **f1**: 0.8926146010186757 - **precision**: 0.8810222036028488 - **recall**: 0.9045161290322581 - **accuracy**: 0.9759397808828684 - **loss**: 0.18803243339061737 Check as well the [large version of this model](https://huggingface.co/pierreguillou/ner-bert-large-cased-pt-lenerbr) with a f1 of 0.908. **Note**: the model [pierreguillou/bert-base-cased-pt-lenerbr](https://huggingface.co/pierreguillou/bert-base-cased-pt-lenerbr) is a language model that was created through the finetuning of the model [BERTimbau base](https://huggingface.co/neuralmind/bert-base-portuguese-cased) on the dataset [LeNER-Br language modeling](https://huggingface.co/datasets/pierreguillou/lener_br_finetuning_language_model) by using a MASK objective. This first specialization of the language model before finetuning on the NER task improved a bit the model quality. To prove it, here are the results of the NER model finetuned from the model [BERTimbau base](https://huggingface.co/neuralmind/bert-base-portuguese-cased) (a non-specialized language model): - **f1**: 0.8716487228203504 - **precision**: 0.8559286898839138 - **recall**: 0.8879569892473118 - **accuracy**: 0.9755893153732458 - **loss**: 0.1133928969502449 ## Blog post [NLP | Modelos e Web App para Reconhecimento de Entidade Nomeada (NER) no domínio jurídico brasileiro](https://medium.com/@pierre_guillou/nlp-modelos-e-web-app-para-reconhecimento-de-entidade-nomeada-ner-no-dom%C3%ADnio-jur%C3%ADdico-b658db55edfb) (29/12/2021) ## Widget & App You can test this model into the widget of this page. Use as well the [NER App](https://huggingface.co/spaces/pierreguillou/ner-bert-pt-lenerbr) that allows comparing the 2 BERT models (base and large) fitted in the NER task with the legal LeNER-Br dataset. ## Using the model for inference in production ```` # install pytorch: check https://pytorch.org/ # !pip install transformers from transformers import AutoModelForTokenClassification, AutoTokenizer import torch # parameters model_name = "pierreguillou/ner-bert-base-cased-pt-lenerbr" model = AutoModelForTokenClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) input_text = "Acrescento que não há de se falar em violação do artigo 114, § 3º, da Constituição Federal, posto que referido dispositivo revela-se impertinente, tratando da possibilidade de ajuizamento de dissídio coletivo pelo Ministério Público do Trabalho nos casos de greve em atividade essencial." # tokenization inputs = tokenizer(input_text, max_length=512, truncation=True, return_tensors="pt") tokens = inputs.tokens() # get predictions outputs = model(**inputs).logits predictions = torch.argmax(outputs, dim=2) # print predictions for token, prediction in zip(tokens, predictions[0].numpy()): print((token, model.config.id2label[prediction])) ```` You can use pipeline, too. However, it seems to have an issue regarding to the max_length of the input sequence. ```` !pip install transformers import transformers from transformers import pipeline model_name = "pierreguillou/ner-bert-base-cased-pt-lenerbr" ner = pipeline( "ner", model=model_name ) ner(input_text) ```` ## Training procedure ### Notebook The notebook of finetuning ([HuggingFace_Notebook_token_classification_NER_LeNER_Br.ipynb](https://github.com/piegu/language-models/blob/master/HuggingFace_Notebook_token_classification_NER_LeNER_Br.ipynb)) is in github. ### Hyperparameters #### batch, learning rate... - per_device_batch_size = 2 - gradient_accumulation_steps = 2 - learning_rate = 2e-5 - num_train_epochs = 10 - weight_decay = 0.01 - optimizer = AdamW - betas = (0.9,0.999) - epsilon = 1e-08 - lr_scheduler_type = linear - seed = 7 #### save model & load best model - save_total_limit = 2 - logging_steps = 300 - eval_steps = logging_steps - evaluation_strategy = 'steps' - logging_strategy = 'steps' - save_strategy = 'steps' - save_steps = logging_steps - load_best_model_at_end = True - fp16 = True #### get best model through a metric - metric_for_best_model = 'eval_f1' - greater_is_better = True ### Training results ```` Num examples = 7828 Num Epochs = 10 Instantaneous batch size per device = 2 Total train batch size (w. parallel, distributed & accumulation) = 4 Gradient Accumulation steps = 2 Total optimization steps = 19570 Step Training Loss Validation Loss Precision Recall F1 Accuracy 300 0.127600 0.178613 0.722909 0.741720 0.732194 0.948802 600 0.088200 0.136965 0.733636 0.867742 0.795074 0.963079 900 0.078000 0.128858 0.791912 0.838065 0.814335 0.965243 1200 0.077800 0.126345 0.815400 0.865376 0.839645 0.967849 1500 0.074100 0.148207 0.779274 0.895914 0.833533 0.960184 1800 0.059500 0.116634 0.830829 0.868172 0.849090 0.969342 2100 0.044500 0.208459 0.887150 0.816559 0.850392 0.960535 2400 0.029400 0.136352 0.867821 0.851398 0.859531 0.970271 2700 0.025000 0.165837 0.814881 0.878495 0.845493 0.961235 3000 0.038400 0.120629 0.811719 0.893763 0.850768 0.971506 3300 0.026200 0.175094 0.823435 0.882581 0.851983 0.962957 3600 0.025600 0.178438 0.881095 0.886022 0.883551 0.963689 3900 0.041000 0.134648 0.789035 0.916129 0.847846 0.967681 4200 0.026700 0.130178 0.821275 0.903226 0.860303 0.972313 4500 0.018500 0.139294 0.844016 0.875054 0.859255 0.971140 4800 0.020800 0.197811 0.892504 0.873118 0.882705 0.965883 5100 0.019300 0.161239 0.848746 0.888172 0.868012 0.967849 5400 0.024000 0.139131 0.837507 0.913333 0.873778 0.970591 5700 0.018400 0.157223 0.899754 0.864731 0.881895 0.970210 6000 0.023500 0.137022 0.883018 0.873333 0.878149 0.973243 6300 0.009300 0.181448 0.840490 0.900860 0.869628 0.968290 6600 0.019200 0.173125 0.821316 0.896559 0.857290 0.966736 6900 0.016100 0.143160 0.789938 0.904946 0.843540 0.968245 7200 0.017000 0.145755 0.823274 0.897634 0.858848 0.969037 7500 0.012100 0.159342 0.825694 0.883226 0.853491 0.967468 7800 0.013800 0.194886 0.861237 0.859570 0.860403 0.964771 8100 0.008000 0.140271 0.829914 0.896129 0.861752 0.971567 8400 0.010300 0.143318 0.826844 0.908817 0.865895 0.973578 8700 0.015000 0.143392 0.847336 0.889247 0.867786 0.973365 9000 0.006000 0.143512 0.847795 0.905591 0.875741 0.972892 9300 0.011800 0.138747 0.827133 0.894194 0.859357 0.971673 9600 0.008500 0.159490 0.837030 0.909032 0.871546 0.970028 9900 0.010700 0.159249 0.846692 0.910968 0.877655 0.970546 10200 0.008100 0.170069 0.848288 0.900645 0.873683 0.969113 10500 0.004800 0.183795 0.860317 0.899355 0.879403 0.969570 10800 0.010700 0.157024 0.837838 0.906667 0.870894 0.971094 11100 0.003800 0.164286 0.845312 0.880215 0.862410 0.970744 11400 0.009700 0.204025 0.884294 0.887527 0.885907 0.968854 11700 0.008900 0.162819 0.829415 0.887742 0.857588 0.970530 12000 0.006400 0.164296 0.852666 0.901075 0.876202 0.971414 12300 0.007100 0.143367 0.852959 0.895699 0.873807 0.973669 12600 0.015800 0.153383 0.859224 0.900430 0.879345 0.972679 12900 0.006600 0.173447 0.869954 0.899140 0.884306 0.970927 13200 0.006800 0.163234 0.856849 0.897204 0.876563 0.971795 13500 0.003200 0.167164 0.850867 0.907957 0.878485 0.971231 13800 0.003600 0.148950 0.867801 0.910538 0.888656 0.976961 14100 0.003500 0.155691 0.847621 0.907957 0.876752 0.974127 14400 0.003300 0.157672 0.846553 0.911183 0.877680 0.974584 14700 0.002500 0.169965 0.847804 0.917634 0.881338 0.973045 15000 0.003400 0.177099 0.842199 0.912473 0.875929 0.971155 15300 0.006000 0.164151 0.848928 0.911183 0.878954 0.973258 15600 0.002400 0.174305 0.847437 0.906667 0.876052 0.971765 15900 0.004100 0.174561 0.852929 0.907957 0.879583 0.972907 16200 0.002600 0.172626 0.843263 0.907097 0.874016 0.972100 16500 0.002100 0.185302 0.841108 0.907312 0.872957 0.970485 16800 0.002900 0.175638 0.840557 0.909247 0.873554 0.971704 17100 0.001600 0.178750 0.857056 0.906452 0.881062 0.971765 17400 0.003900 0.188910 0.853619 0.907957 0.879950 0.970835 17700 0.002700 0.180822 0.864699 0.907097 0.885390 0.972283 18000 0.001300 0.179974 0.868150 0.906237 0.886785 0.973060 18300 0.000800 0.188032 0.881022 0.904516 0.892615 0.972572 18600 0.002700 0.183266 0.868601 0.901290 0.884644 0.972298 18900 0.001600 0.180301 0.862041 0.903011 0.882050 0.972344 19200 0.002300 0.183432 0.855370 0.904301 0.879155 0.971109 19500 0.001800 0.183381 0.854501 0.904301 0.878696 0.971186 ```` ### Validation metrics by Named Entity ```` Num examples = 1177 {'JURISPRUDENCIA': {'f1': 0.7016574585635359, 'number': 657, 'precision': 0.6422250316055625, 'recall': 0.7732115677321156}, 'LEGISLACAO': {'f1': 0.8839681133746677, 'number': 571, 'precision': 0.8942652329749103, 'recall': 0.8739054290718039}, 'LOCAL': {'f1': 0.8253968253968254, 'number': 194, 'precision': 0.7368421052631579, 'recall': 0.9381443298969072}, 'ORGANIZACAO': {'f1': 0.8934049079754601, 'number': 1340, 'precision': 0.918769716088328, 'recall': 0.8694029850746269}, 'PESSOA': {'f1': 0.982653539615565, 'number': 1072, 'precision': 0.9877474081055608, 'recall': 0.9776119402985075}, 'TEMPO': {'f1': 0.9657657657657657, 'number': 816, 'precision': 0.9469964664310954, 'recall': 0.9852941176470589}, 'overall_accuracy': 0.9725722644643211, 'overall_f1': 0.8926146010186757, 'overall_precision': 0.8810222036028488, 'overall_recall': 0.9045161290322581} ````
facebook/hubert-base-ls960
facebook
"2021-11-05T12:43:12Z"
86,746
44
transformers
[ "transformers", "pytorch", "tf", "hubert", "feature-extraction", "speech", "en", "dataset:librispeech_asr", "arxiv:2106.07447", "license:apache-2.0", "endpoints_compatible", "region:us" ]
feature-extraction
"2022-03-02T23:29:05Z"
--- language: en datasets: - librispeech_asr tags: - speech license: apache-2.0 --- # Hubert-Base [Facebook's Hubert](https://ai.facebook.com/blog/hubert-self-supervised-representation-learning-for-speech-recognition-generation-and-compression) The base model pretrained on 16kHz sampled speech audio. When using the model make sure that your speech input is also sampled at 16Khz. **Note**: This model does not have a tokenizer as it was pretrained on audio alone. In order to use this model **speech recognition**, a tokenizer should be created and the model should be fine-tuned on labeled text data. Check out [this blog](https://huggingface.co/blog/fine-tune-wav2vec2-english) for more in-detail explanation of how to fine-tune the model. [Paper](https://arxiv.org/abs/2106.07447) Authors: Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed **Abstract** Self-supervised approaches for speech representation learning are challenged by three unique problems: (1) there are multiple sound units in each input utterance, (2) there is no lexicon of input sound units during the pre-training phase, and (3) sound units have variable lengths with no explicit segmentation. To deal with these three problems, we propose the Hidden-Unit BERT (HuBERT) approach for self-supervised speech representation learning, which utilizes an offline clustering step to provide aligned target labels for a BERT-like prediction loss. A key ingredient of our approach is applying the prediction loss over the masked regions only, which forces the model to learn a combined acoustic and language model over the continuous inputs. HuBERT relies primarily on the consistency of the unsupervised clustering step rather than the intrinsic quality of the assigned cluster labels. Starting with a simple k-means teacher of 100 clusters, and using two iterations of clustering, the HuBERT model either matches or improves upon the state-of-the-art wav2vec 2.0 performance on the Librispeech (960h) and Libri-light (60,000h) benchmarks with 10min, 1h, 10h, 100h, and 960h fine-tuning subsets. Using a 1B parameter model, HuBERT shows up to 19% and 13% relative WER reduction on the more challenging dev-other and test-other evaluation subsets. The original model can be found under https://github.com/pytorch/fairseq/tree/master/examples/hubert . # Usage See [this blog](https://huggingface.co/blog/fine-tune-wav2vec2-english) for more information on how to fine-tune the model. Note that the class `Wav2Vec2ForCTC` has to be replaced by `HubertForCTC`.
jinaai/jina-embeddings-v2-base-en
jinaai
"2024-08-06T14:40:36Z"
86,601
690
sentence-transformers
[ "sentence-transformers", "pytorch", "coreml", "onnx", "safetensors", "bert", "feature-extraction", "sentence-similarity", "mteb", "custom_code", "en", "dataset:allenai/c4", "arxiv:2108.12409", "arxiv:2310.19923", "license:apache-2.0", "model-index", "autotrain_compatible", "text-embeddings-inference", "region:us" ]
feature-extraction
"2023-09-27T17:04:00Z"
--- tags: - sentence-transformers - feature-extraction - sentence-similarity - mteb datasets: - allenai/c4 language: en inference: false license: apache-2.0 model-index: - name: jina-embedding-b-en-v2 results: - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (en) config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 74.73134328358209 - type: ap value: 37.765427081831035 - type: f1 value: 68.79367444339518 - task: type: Classification dataset: type: mteb/amazon_polarity name: MTEB AmazonPolarityClassification config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 88.544275 - type: ap value: 84.61328675662887 - type: f1 value: 88.51879035862375 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (en) config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 45.263999999999996 - type: f1 value: 43.778759656699435 - task: type: Retrieval dataset: type: arguana name: MTEB ArguAna config: default split: test revision: None metrics: - type: map_at_1 value: 21.693 - type: map_at_10 value: 35.487 - type: map_at_100 value: 36.862 - type: map_at_1000 value: 36.872 - type: map_at_3 value: 30.049999999999997 - type: map_at_5 value: 32.966 - type: mrr_at_1 value: 21.977 - type: mrr_at_10 value: 35.565999999999995 - type: mrr_at_100 value: 36.948 - type: mrr_at_1000 value: 36.958 - type: mrr_at_3 value: 30.121 - type: mrr_at_5 value: 33.051 - type: ndcg_at_1 value: 21.693 - type: ndcg_at_10 value: 44.181 - type: ndcg_at_100 value: 49.982 - type: ndcg_at_1000 value: 50.233000000000004 - type: ndcg_at_3 value: 32.830999999999996 - type: ndcg_at_5 value: 38.080000000000005 - type: precision_at_1 value: 21.693 - type: precision_at_10 value: 7.248 - type: precision_at_100 value: 0.9769999999999999 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 13.632 - type: precision_at_5 value: 10.725 - type: recall_at_1 value: 21.693 - type: recall_at_10 value: 72.475 - type: recall_at_100 value: 97.653 - type: recall_at_1000 value: 99.57300000000001 - type: recall_at_3 value: 40.896 - type: recall_at_5 value: 53.627 - task: type: Clustering dataset: type: mteb/arxiv-clustering-p2p name: MTEB ArxivClusteringP2P config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 45.39242428696777 - task: type: Clustering dataset: type: mteb/arxiv-clustering-s2s name: MTEB ArxivClusteringS2S config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 36.675626784714 - task: type: Reranking dataset: type: mteb/askubuntudupquestions-reranking name: MTEB AskUbuntuDupQuestions config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 62.247725694904034 - type: mrr value: 74.91359978894604 - task: type: STS dataset: type: mteb/biosses-sts name: MTEB BIOSSES config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 82.68003802970496 - type: cos_sim_spearman value: 81.23438110096286 - type: euclidean_pearson value: 81.87462986142582 - type: euclidean_spearman value: 81.23438110096286 - type: manhattan_pearson value: 81.61162566600755 - type: manhattan_spearman value: 81.11329400456184 - task: type: Classification dataset: type: mteb/banking77 name: MTEB Banking77Classification config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 84.01298701298701 - type: f1 value: 83.31690714969382 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-p2p name: MTEB BiorxivClusteringP2P config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 37.050108150972086 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-s2s name: MTEB BiorxivClusteringS2S config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 30.15731442819715 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackAndroidRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 31.391999999999996 - type: map_at_10 value: 42.597 - type: map_at_100 value: 44.07 - type: map_at_1000 value: 44.198 - type: map_at_3 value: 38.957 - type: map_at_5 value: 40.961 - type: mrr_at_1 value: 37.196 - type: mrr_at_10 value: 48.152 - type: mrr_at_100 value: 48.928 - type: mrr_at_1000 value: 48.964999999999996 - type: mrr_at_3 value: 45.446 - type: mrr_at_5 value: 47.205999999999996 - type: ndcg_at_1 value: 37.196 - type: ndcg_at_10 value: 49.089 - type: ndcg_at_100 value: 54.471000000000004 - type: ndcg_at_1000 value: 56.385 - type: ndcg_at_3 value: 43.699 - type: ndcg_at_5 value: 46.22 - type: precision_at_1 value: 37.196 - type: precision_at_10 value: 9.313 - type: precision_at_100 value: 1.478 - type: precision_at_1000 value: 0.198 - type: precision_at_3 value: 20.839 - type: precision_at_5 value: 14.936 - type: recall_at_1 value: 31.391999999999996 - type: recall_at_10 value: 61.876 - type: recall_at_100 value: 84.214 - type: recall_at_1000 value: 95.985 - type: recall_at_3 value: 46.6 - type: recall_at_5 value: 53.588 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackEnglishRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 29.083 - type: map_at_10 value: 38.812999999999995 - type: map_at_100 value: 40.053 - type: map_at_1000 value: 40.188 - type: map_at_3 value: 36.111 - type: map_at_5 value: 37.519000000000005 - type: mrr_at_1 value: 36.497 - type: mrr_at_10 value: 44.85 - type: mrr_at_100 value: 45.546 - type: mrr_at_1000 value: 45.593 - type: mrr_at_3 value: 42.686 - type: mrr_at_5 value: 43.909 - type: ndcg_at_1 value: 36.497 - type: ndcg_at_10 value: 44.443 - type: ndcg_at_100 value: 48.979 - type: ndcg_at_1000 value: 51.154999999999994 - type: ndcg_at_3 value: 40.660000000000004 - type: ndcg_at_5 value: 42.193000000000005 - type: precision_at_1 value: 36.497 - type: precision_at_10 value: 8.433 - type: precision_at_100 value: 1.369 - type: precision_at_1000 value: 0.185 - type: precision_at_3 value: 19.894000000000002 - type: precision_at_5 value: 13.873 - type: recall_at_1 value: 29.083 - type: recall_at_10 value: 54.313 - type: recall_at_100 value: 73.792 - type: recall_at_1000 value: 87.629 - type: recall_at_3 value: 42.257 - type: recall_at_5 value: 47.066 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackGamingRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 38.556000000000004 - type: map_at_10 value: 50.698 - type: map_at_100 value: 51.705 - type: map_at_1000 value: 51.768 - type: map_at_3 value: 47.848 - type: map_at_5 value: 49.358000000000004 - type: mrr_at_1 value: 43.95 - type: mrr_at_10 value: 54.191 - type: mrr_at_100 value: 54.852999999999994 - type: mrr_at_1000 value: 54.885 - type: mrr_at_3 value: 51.954 - type: mrr_at_5 value: 53.13 - type: ndcg_at_1 value: 43.95 - type: ndcg_at_10 value: 56.516 - type: ndcg_at_100 value: 60.477000000000004 - type: ndcg_at_1000 value: 61.746 - type: ndcg_at_3 value: 51.601 - type: ndcg_at_5 value: 53.795 - type: precision_at_1 value: 43.95 - type: precision_at_10 value: 9.009 - type: precision_at_100 value: 1.189 - type: precision_at_1000 value: 0.135 - type: precision_at_3 value: 22.989 - type: precision_at_5 value: 15.473 - type: recall_at_1 value: 38.556000000000004 - type: recall_at_10 value: 70.159 - type: recall_at_100 value: 87.132 - type: recall_at_1000 value: 96.16 - type: recall_at_3 value: 56.906 - type: recall_at_5 value: 62.332 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackGisRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 24.238 - type: map_at_10 value: 32.5 - type: map_at_100 value: 33.637 - type: map_at_1000 value: 33.719 - type: map_at_3 value: 30.026999999999997 - type: map_at_5 value: 31.555 - type: mrr_at_1 value: 26.328000000000003 - type: mrr_at_10 value: 34.44 - type: mrr_at_100 value: 35.455999999999996 - type: mrr_at_1000 value: 35.521 - type: mrr_at_3 value: 32.034 - type: mrr_at_5 value: 33.565 - type: ndcg_at_1 value: 26.328000000000003 - type: ndcg_at_10 value: 37.202 - type: ndcg_at_100 value: 42.728 - type: ndcg_at_1000 value: 44.792 - type: ndcg_at_3 value: 32.368 - type: ndcg_at_5 value: 35.008 - type: precision_at_1 value: 26.328000000000003 - type: precision_at_10 value: 5.7059999999999995 - type: precision_at_100 value: 0.8880000000000001 - type: precision_at_1000 value: 0.11100000000000002 - type: precision_at_3 value: 13.672 - type: precision_at_5 value: 9.74 - type: recall_at_1 value: 24.238 - type: recall_at_10 value: 49.829 - type: recall_at_100 value: 75.21 - type: recall_at_1000 value: 90.521 - type: recall_at_3 value: 36.867 - type: recall_at_5 value: 43.241 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackMathematicaRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 15.378 - type: map_at_10 value: 22.817999999999998 - type: map_at_100 value: 23.977999999999998 - type: map_at_1000 value: 24.108 - type: map_at_3 value: 20.719 - type: map_at_5 value: 21.889 - type: mrr_at_1 value: 19.03 - type: mrr_at_10 value: 27.022000000000002 - type: mrr_at_100 value: 28.011999999999997 - type: mrr_at_1000 value: 28.096 - type: mrr_at_3 value: 24.855 - type: mrr_at_5 value: 26.029999999999998 - type: ndcg_at_1 value: 19.03 - type: ndcg_at_10 value: 27.526 - type: ndcg_at_100 value: 33.040000000000006 - type: ndcg_at_1000 value: 36.187000000000005 - type: ndcg_at_3 value: 23.497 - type: ndcg_at_5 value: 25.334 - type: precision_at_1 value: 19.03 - type: precision_at_10 value: 4.963 - type: precision_at_100 value: 0.893 - type: precision_at_1000 value: 0.13 - type: precision_at_3 value: 11.360000000000001 - type: precision_at_5 value: 8.134 - type: recall_at_1 value: 15.378 - type: recall_at_10 value: 38.061 - type: recall_at_100 value: 61.754 - type: recall_at_1000 value: 84.259 - type: recall_at_3 value: 26.788 - type: recall_at_5 value: 31.326999999999998 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackPhysicsRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 27.511999999999997 - type: map_at_10 value: 37.429 - type: map_at_100 value: 38.818000000000005 - type: map_at_1000 value: 38.924 - type: map_at_3 value: 34.625 - type: map_at_5 value: 36.064 - type: mrr_at_1 value: 33.300999999999995 - type: mrr_at_10 value: 43.036 - type: mrr_at_100 value: 43.894 - type: mrr_at_1000 value: 43.936 - type: mrr_at_3 value: 40.825 - type: mrr_at_5 value: 42.028 - type: ndcg_at_1 value: 33.300999999999995 - type: ndcg_at_10 value: 43.229 - type: ndcg_at_100 value: 48.992000000000004 - type: ndcg_at_1000 value: 51.02100000000001 - type: ndcg_at_3 value: 38.794000000000004 - type: ndcg_at_5 value: 40.65 - type: precision_at_1 value: 33.300999999999995 - type: precision_at_10 value: 7.777000000000001 - type: precision_at_100 value: 1.269 - type: precision_at_1000 value: 0.163 - type: precision_at_3 value: 18.351 - type: precision_at_5 value: 12.762 - type: recall_at_1 value: 27.511999999999997 - type: recall_at_10 value: 54.788000000000004 - type: recall_at_100 value: 79.105 - type: recall_at_1000 value: 92.49199999999999 - type: recall_at_3 value: 41.924 - type: recall_at_5 value: 47.026 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackProgrammersRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 24.117 - type: map_at_10 value: 33.32 - type: map_at_100 value: 34.677 - type: map_at_1000 value: 34.78 - type: map_at_3 value: 30.233999999999998 - type: map_at_5 value: 31.668000000000003 - type: mrr_at_1 value: 29.566 - type: mrr_at_10 value: 38.244 - type: mrr_at_100 value: 39.245000000000005 - type: mrr_at_1000 value: 39.296 - type: mrr_at_3 value: 35.864000000000004 - type: mrr_at_5 value: 36.919999999999995 - type: ndcg_at_1 value: 29.566 - type: ndcg_at_10 value: 39.127 - type: ndcg_at_100 value: 44.989000000000004 - type: ndcg_at_1000 value: 47.189 - type: ndcg_at_3 value: 34.039 - type: ndcg_at_5 value: 35.744 - type: precision_at_1 value: 29.566 - type: precision_at_10 value: 7.385999999999999 - type: precision_at_100 value: 1.204 - type: precision_at_1000 value: 0.158 - type: precision_at_3 value: 16.286 - type: precision_at_5 value: 11.484 - type: recall_at_1 value: 24.117 - type: recall_at_10 value: 51.559999999999995 - type: recall_at_100 value: 77.104 - type: recall_at_1000 value: 91.79899999999999 - type: recall_at_3 value: 36.82 - type: recall_at_5 value: 41.453 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 25.17625 - type: map_at_10 value: 34.063916666666664 - type: map_at_100 value: 35.255500000000005 - type: map_at_1000 value: 35.37275 - type: map_at_3 value: 31.351666666666667 - type: map_at_5 value: 32.80608333333333 - type: mrr_at_1 value: 29.59783333333333 - type: mrr_at_10 value: 38.0925 - type: mrr_at_100 value: 38.957249999999995 - type: mrr_at_1000 value: 39.01608333333333 - type: mrr_at_3 value: 35.77625 - type: mrr_at_5 value: 37.04991666666667 - type: ndcg_at_1 value: 29.59783333333333 - type: ndcg_at_10 value: 39.343666666666664 - type: ndcg_at_100 value: 44.488249999999994 - type: ndcg_at_1000 value: 46.83358333333334 - type: ndcg_at_3 value: 34.69708333333333 - type: ndcg_at_5 value: 36.75075 - type: precision_at_1 value: 29.59783333333333 - type: precision_at_10 value: 6.884083333333332 - type: precision_at_100 value: 1.114 - type: precision_at_1000 value: 0.15108333333333332 - type: precision_at_3 value: 15.965250000000003 - type: precision_at_5 value: 11.246500000000001 - type: recall_at_1 value: 25.17625 - type: recall_at_10 value: 51.015999999999984 - type: recall_at_100 value: 73.60174999999998 - type: recall_at_1000 value: 89.849 - type: recall_at_3 value: 37.88399999999999 - type: recall_at_5 value: 43.24541666666666 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackStatsRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 24.537 - type: map_at_10 value: 31.081999999999997 - type: map_at_100 value: 32.042 - type: map_at_1000 value: 32.141 - type: map_at_3 value: 29.137 - type: map_at_5 value: 30.079 - type: mrr_at_1 value: 27.454 - type: mrr_at_10 value: 33.694 - type: mrr_at_100 value: 34.579 - type: mrr_at_1000 value: 34.649 - type: mrr_at_3 value: 32.004 - type: mrr_at_5 value: 32.794000000000004 - type: ndcg_at_1 value: 27.454 - type: ndcg_at_10 value: 34.915 - type: ndcg_at_100 value: 39.641 - type: ndcg_at_1000 value: 42.105 - type: ndcg_at_3 value: 31.276 - type: ndcg_at_5 value: 32.65 - type: precision_at_1 value: 27.454 - type: precision_at_10 value: 5.337 - type: precision_at_100 value: 0.8250000000000001 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_3 value: 13.241 - type: precision_at_5 value: 8.895999999999999 - type: recall_at_1 value: 24.537 - type: recall_at_10 value: 44.324999999999996 - type: recall_at_100 value: 65.949 - type: recall_at_1000 value: 84.017 - type: recall_at_3 value: 33.857 - type: recall_at_5 value: 37.316 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackTexRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 17.122 - type: map_at_10 value: 24.32 - type: map_at_100 value: 25.338 - type: map_at_1000 value: 25.462 - type: map_at_3 value: 22.064 - type: map_at_5 value: 23.322000000000003 - type: mrr_at_1 value: 20.647 - type: mrr_at_10 value: 27.858 - type: mrr_at_100 value: 28.743999999999996 - type: mrr_at_1000 value: 28.819 - type: mrr_at_3 value: 25.769 - type: mrr_at_5 value: 26.964 - type: ndcg_at_1 value: 20.647 - type: ndcg_at_10 value: 28.849999999999998 - type: ndcg_at_100 value: 33.849000000000004 - type: ndcg_at_1000 value: 36.802 - type: ndcg_at_3 value: 24.799 - type: ndcg_at_5 value: 26.682 - type: precision_at_1 value: 20.647 - type: precision_at_10 value: 5.2170000000000005 - type: precision_at_100 value: 0.906 - type: precision_at_1000 value: 0.134 - type: precision_at_3 value: 11.769 - type: precision_at_5 value: 8.486 - type: recall_at_1 value: 17.122 - type: recall_at_10 value: 38.999 - type: recall_at_100 value: 61.467000000000006 - type: recall_at_1000 value: 82.716 - type: recall_at_3 value: 27.601 - type: recall_at_5 value: 32.471 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackUnixRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 24.396 - type: map_at_10 value: 33.415 - type: map_at_100 value: 34.521 - type: map_at_1000 value: 34.631 - type: map_at_3 value: 30.703999999999997 - type: map_at_5 value: 32.166 - type: mrr_at_1 value: 28.825 - type: mrr_at_10 value: 37.397000000000006 - type: mrr_at_100 value: 38.286 - type: mrr_at_1000 value: 38.346000000000004 - type: mrr_at_3 value: 35.028 - type: mrr_at_5 value: 36.32 - type: ndcg_at_1 value: 28.825 - type: ndcg_at_10 value: 38.656 - type: ndcg_at_100 value: 43.856 - type: ndcg_at_1000 value: 46.31 - type: ndcg_at_3 value: 33.793 - type: ndcg_at_5 value: 35.909 - type: precision_at_1 value: 28.825 - type: precision_at_10 value: 6.567 - type: precision_at_100 value: 1.0330000000000001 - type: precision_at_1000 value: 0.135 - type: precision_at_3 value: 15.516 - type: precision_at_5 value: 10.914 - type: recall_at_1 value: 24.396 - type: recall_at_10 value: 50.747 - type: recall_at_100 value: 73.477 - type: recall_at_1000 value: 90.801 - type: recall_at_3 value: 37.1 - type: recall_at_5 value: 42.589 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackWebmastersRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 25.072 - type: map_at_10 value: 34.307 - type: map_at_100 value: 35.725 - type: map_at_1000 value: 35.943999999999996 - type: map_at_3 value: 30.906 - type: map_at_5 value: 32.818000000000005 - type: mrr_at_1 value: 29.644 - type: mrr_at_10 value: 38.673 - type: mrr_at_100 value: 39.459 - type: mrr_at_1000 value: 39.527 - type: mrr_at_3 value: 35.771 - type: mrr_at_5 value: 37.332 - type: ndcg_at_1 value: 29.644 - type: ndcg_at_10 value: 40.548 - type: ndcg_at_100 value: 45.678999999999995 - type: ndcg_at_1000 value: 48.488 - type: ndcg_at_3 value: 34.887 - type: ndcg_at_5 value: 37.543 - type: precision_at_1 value: 29.644 - type: precision_at_10 value: 7.688000000000001 - type: precision_at_100 value: 1.482 - type: precision_at_1000 value: 0.23600000000000002 - type: precision_at_3 value: 16.206 - type: precision_at_5 value: 12.016 - type: recall_at_1 value: 25.072 - type: recall_at_10 value: 53.478 - type: recall_at_100 value: 76.07300000000001 - type: recall_at_1000 value: 93.884 - type: recall_at_3 value: 37.583 - type: recall_at_5 value: 44.464 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackWordpressRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 20.712 - type: map_at_10 value: 27.467999999999996 - type: map_at_100 value: 28.502 - type: map_at_1000 value: 28.610000000000003 - type: map_at_3 value: 24.887999999999998 - type: map_at_5 value: 26.273999999999997 - type: mrr_at_1 value: 22.736 - type: mrr_at_10 value: 29.553 - type: mrr_at_100 value: 30.485 - type: mrr_at_1000 value: 30.56 - type: mrr_at_3 value: 27.078999999999997 - type: mrr_at_5 value: 28.401 - type: ndcg_at_1 value: 22.736 - type: ndcg_at_10 value: 32.023 - type: ndcg_at_100 value: 37.158 - type: ndcg_at_1000 value: 39.823 - type: ndcg_at_3 value: 26.951999999999998 - type: ndcg_at_5 value: 29.281000000000002 - type: precision_at_1 value: 22.736 - type: precision_at_10 value: 5.213 - type: precision_at_100 value: 0.832 - type: precision_at_1000 value: 0.116 - type: precision_at_3 value: 11.459999999999999 - type: precision_at_5 value: 8.244 - type: recall_at_1 value: 20.712 - type: recall_at_10 value: 44.057 - type: recall_at_100 value: 67.944 - type: recall_at_1000 value: 87.925 - type: recall_at_3 value: 30.305 - type: recall_at_5 value: 36.071999999999996 - task: type: Retrieval dataset: type: climate-fever name: MTEB ClimateFEVER config: default split: test revision: None metrics: - type: map_at_1 value: 10.181999999999999 - type: map_at_10 value: 16.66 - type: map_at_100 value: 18.273 - type: map_at_1000 value: 18.45 - type: map_at_3 value: 14.141 - type: map_at_5 value: 15.455 - type: mrr_at_1 value: 22.15 - type: mrr_at_10 value: 32.062000000000005 - type: mrr_at_100 value: 33.116 - type: mrr_at_1000 value: 33.168 - type: mrr_at_3 value: 28.827 - type: mrr_at_5 value: 30.892999999999997 - type: ndcg_at_1 value: 22.15 - type: ndcg_at_10 value: 23.532 - type: ndcg_at_100 value: 30.358 - type: ndcg_at_1000 value: 33.783 - type: ndcg_at_3 value: 19.222 - type: ndcg_at_5 value: 20.919999999999998 - type: precision_at_1 value: 22.15 - type: precision_at_10 value: 7.185999999999999 - type: precision_at_100 value: 1.433 - type: precision_at_1000 value: 0.207 - type: precision_at_3 value: 13.941 - type: precision_at_5 value: 10.906 - type: recall_at_1 value: 10.181999999999999 - type: recall_at_10 value: 28.104000000000003 - type: recall_at_100 value: 51.998999999999995 - type: recall_at_1000 value: 71.311 - type: recall_at_3 value: 17.698 - type: recall_at_5 value: 22.262999999999998 - task: type: Retrieval dataset: type: dbpedia-entity name: MTEB DBPedia config: default split: test revision: None metrics: - type: map_at_1 value: 6.669 - type: map_at_10 value: 15.552 - type: map_at_100 value: 21.865000000000002 - type: map_at_1000 value: 23.268 - type: map_at_3 value: 11.309 - type: map_at_5 value: 13.084000000000001 - type: mrr_at_1 value: 55.50000000000001 - type: mrr_at_10 value: 66.46600000000001 - type: mrr_at_100 value: 66.944 - type: mrr_at_1000 value: 66.956 - type: mrr_at_3 value: 64.542 - type: mrr_at_5 value: 65.717 - type: ndcg_at_1 value: 44.75 - type: ndcg_at_10 value: 35.049 - type: ndcg_at_100 value: 39.073 - type: ndcg_at_1000 value: 46.208 - type: ndcg_at_3 value: 39.525 - type: ndcg_at_5 value: 37.156 - type: precision_at_1 value: 55.50000000000001 - type: precision_at_10 value: 27.800000000000004 - type: precision_at_100 value: 9.013 - type: precision_at_1000 value: 1.8800000000000001 - type: precision_at_3 value: 42.667 - type: precision_at_5 value: 36.0 - type: recall_at_1 value: 6.669 - type: recall_at_10 value: 21.811 - type: recall_at_100 value: 45.112 - type: recall_at_1000 value: 67.806 - type: recall_at_3 value: 13.373 - type: recall_at_5 value: 16.615 - task: type: Classification dataset: type: mteb/emotion name: MTEB EmotionClassification config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 48.769999999999996 - type: f1 value: 42.91448356376592 - task: type: Retrieval dataset: type: fever name: MTEB FEVER config: default split: test revision: None metrics: - type: map_at_1 value: 54.013 - type: map_at_10 value: 66.239 - type: map_at_100 value: 66.62599999999999 - type: map_at_1000 value: 66.644 - type: map_at_3 value: 63.965 - type: map_at_5 value: 65.45400000000001 - type: mrr_at_1 value: 58.221000000000004 - type: mrr_at_10 value: 70.43700000000001 - type: mrr_at_100 value: 70.744 - type: mrr_at_1000 value: 70.75099999999999 - type: mrr_at_3 value: 68.284 - type: mrr_at_5 value: 69.721 - type: ndcg_at_1 value: 58.221000000000004 - type: ndcg_at_10 value: 72.327 - type: ndcg_at_100 value: 73.953 - type: ndcg_at_1000 value: 74.312 - type: ndcg_at_3 value: 68.062 - type: ndcg_at_5 value: 70.56400000000001 - type: precision_at_1 value: 58.221000000000004 - type: precision_at_10 value: 9.521 - type: precision_at_100 value: 1.045 - type: precision_at_1000 value: 0.109 - type: precision_at_3 value: 27.348 - type: precision_at_5 value: 17.794999999999998 - type: recall_at_1 value: 54.013 - type: recall_at_10 value: 86.957 - type: recall_at_100 value: 93.911 - type: recall_at_1000 value: 96.38 - type: recall_at_3 value: 75.555 - type: recall_at_5 value: 81.671 - task: type: Retrieval dataset: type: fiqa name: MTEB FiQA2018 config: default split: test revision: None metrics: - type: map_at_1 value: 21.254 - type: map_at_10 value: 33.723 - type: map_at_100 value: 35.574 - type: map_at_1000 value: 35.730000000000004 - type: map_at_3 value: 29.473 - type: map_at_5 value: 31.543 - type: mrr_at_1 value: 41.358 - type: mrr_at_10 value: 49.498 - type: mrr_at_100 value: 50.275999999999996 - type: mrr_at_1000 value: 50.308 - type: mrr_at_3 value: 47.016000000000005 - type: mrr_at_5 value: 48.336 - type: ndcg_at_1 value: 41.358 - type: ndcg_at_10 value: 41.579 - type: ndcg_at_100 value: 48.455 - type: ndcg_at_1000 value: 51.165000000000006 - type: ndcg_at_3 value: 37.681 - type: ndcg_at_5 value: 38.49 - type: precision_at_1 value: 41.358 - type: precision_at_10 value: 11.543000000000001 - type: precision_at_100 value: 1.87 - type: precision_at_1000 value: 0.23600000000000002 - type: precision_at_3 value: 24.743000000000002 - type: precision_at_5 value: 17.994 - type: recall_at_1 value: 21.254 - type: recall_at_10 value: 48.698 - type: recall_at_100 value: 74.588 - type: recall_at_1000 value: 91.00200000000001 - type: recall_at_3 value: 33.939 - type: recall_at_5 value: 39.367000000000004 - task: type: Retrieval dataset: type: hotpotqa name: MTEB HotpotQA config: default split: test revision: None metrics: - type: map_at_1 value: 35.922 - type: map_at_10 value: 52.32599999999999 - type: map_at_100 value: 53.18000000000001 - type: map_at_1000 value: 53.245 - type: map_at_3 value: 49.294 - type: map_at_5 value: 51.202999999999996 - type: mrr_at_1 value: 71.843 - type: mrr_at_10 value: 78.24600000000001 - type: mrr_at_100 value: 78.515 - type: mrr_at_1000 value: 78.527 - type: mrr_at_3 value: 77.17500000000001 - type: mrr_at_5 value: 77.852 - type: ndcg_at_1 value: 71.843 - type: ndcg_at_10 value: 61.379 - type: ndcg_at_100 value: 64.535 - type: ndcg_at_1000 value: 65.888 - type: ndcg_at_3 value: 56.958 - type: ndcg_at_5 value: 59.434 - type: precision_at_1 value: 71.843 - type: precision_at_10 value: 12.686 - type: precision_at_100 value: 1.517 - type: precision_at_1000 value: 0.16999999999999998 - type: precision_at_3 value: 35.778 - type: precision_at_5 value: 23.422 - type: recall_at_1 value: 35.922 - type: recall_at_10 value: 63.43 - type: recall_at_100 value: 75.868 - type: recall_at_1000 value: 84.88900000000001 - type: recall_at_3 value: 53.666000000000004 - type: recall_at_5 value: 58.555 - task: type: Classification dataset: type: mteb/imdb name: MTEB ImdbClassification config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 79.4408 - type: ap value: 73.52820871620366 - type: f1 value: 79.36240238685001 - task: type: Retrieval dataset: type: msmarco name: MTEB MSMARCO config: default split: dev revision: None metrics: - type: map_at_1 value: 21.826999999999998 - type: map_at_10 value: 34.04 - type: map_at_100 value: 35.226 - type: map_at_1000 value: 35.275 - type: map_at_3 value: 30.165999999999997 - type: map_at_5 value: 32.318000000000005 - type: mrr_at_1 value: 22.464000000000002 - type: mrr_at_10 value: 34.631 - type: mrr_at_100 value: 35.752 - type: mrr_at_1000 value: 35.795 - type: mrr_at_3 value: 30.798 - type: mrr_at_5 value: 32.946999999999996 - type: ndcg_at_1 value: 22.464000000000002 - type: ndcg_at_10 value: 40.919 - type: ndcg_at_100 value: 46.632 - type: ndcg_at_1000 value: 47.833 - type: ndcg_at_3 value: 32.992 - type: ndcg_at_5 value: 36.834 - type: precision_at_1 value: 22.464000000000002 - type: precision_at_10 value: 6.494 - type: precision_at_100 value: 0.9369999999999999 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 14.021 - type: precision_at_5 value: 10.347000000000001 - type: recall_at_1 value: 21.826999999999998 - type: recall_at_10 value: 62.132 - type: recall_at_100 value: 88.55199999999999 - type: recall_at_1000 value: 97.707 - type: recall_at_3 value: 40.541 - type: recall_at_5 value: 49.739 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (en) config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 95.68399452804377 - type: f1 value: 95.25490609832268 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (en) config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 83.15321477428182 - type: f1 value: 60.35476439087966 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (en) config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 71.92669804976462 - type: f1 value: 69.22815107207565 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (en) config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 74.4855413584398 - type: f1 value: 72.92107516103387 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-p2p name: MTEB MedrxivClusteringP2P config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 32.412679360205544 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-s2s name: MTEB MedrxivClusteringS2S config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 28.09211869875204 - task: type: Reranking dataset: type: mteb/mind_small name: MTEB MindSmallReranking config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 30.540919056982545 - type: mrr value: 31.529904607063536 - task: type: Retrieval dataset: type: nfcorpus name: MTEB NFCorpus config: default split: test revision: None metrics: - type: map_at_1 value: 5.745 - type: map_at_10 value: 12.013 - type: map_at_100 value: 15.040000000000001 - type: map_at_1000 value: 16.427 - type: map_at_3 value: 8.841000000000001 - type: map_at_5 value: 10.289 - type: mrr_at_1 value: 45.201 - type: mrr_at_10 value: 53.483999999999995 - type: mrr_at_100 value: 54.20700000000001 - type: mrr_at_1000 value: 54.252 - type: mrr_at_3 value: 51.29 - type: mrr_at_5 value: 52.73 - type: ndcg_at_1 value: 43.808 - type: ndcg_at_10 value: 32.445 - type: ndcg_at_100 value: 30.031000000000002 - type: ndcg_at_1000 value: 39.007 - type: ndcg_at_3 value: 37.204 - type: ndcg_at_5 value: 35.07 - type: precision_at_1 value: 45.201 - type: precision_at_10 value: 23.684 - type: precision_at_100 value: 7.600999999999999 - type: precision_at_1000 value: 2.043 - type: precision_at_3 value: 33.953 - type: precision_at_5 value: 29.412 - type: recall_at_1 value: 5.745 - type: recall_at_10 value: 16.168 - type: recall_at_100 value: 30.875999999999998 - type: recall_at_1000 value: 62.686 - type: recall_at_3 value: 9.75 - type: recall_at_5 value: 12.413 - task: type: Retrieval dataset: type: nq name: MTEB NQ config: default split: test revision: None metrics: - type: map_at_1 value: 37.828 - type: map_at_10 value: 53.239000000000004 - type: map_at_100 value: 54.035999999999994 - type: map_at_1000 value: 54.067 - type: map_at_3 value: 49.289 - type: map_at_5 value: 51.784 - type: mrr_at_1 value: 42.497 - type: mrr_at_10 value: 55.916999999999994 - type: mrr_at_100 value: 56.495 - type: mrr_at_1000 value: 56.516999999999996 - type: mrr_at_3 value: 52.800000000000004 - type: mrr_at_5 value: 54.722 - type: ndcg_at_1 value: 42.468 - type: ndcg_at_10 value: 60.437 - type: ndcg_at_100 value: 63.731 - type: ndcg_at_1000 value: 64.41799999999999 - type: ndcg_at_3 value: 53.230999999999995 - type: ndcg_at_5 value: 57.26 - type: precision_at_1 value: 42.468 - type: precision_at_10 value: 9.47 - type: precision_at_100 value: 1.1360000000000001 - type: precision_at_1000 value: 0.12 - type: precision_at_3 value: 23.724999999999998 - type: precision_at_5 value: 16.593 - type: recall_at_1 value: 37.828 - type: recall_at_10 value: 79.538 - type: recall_at_100 value: 93.646 - type: recall_at_1000 value: 98.72999999999999 - type: recall_at_3 value: 61.134 - type: recall_at_5 value: 70.377 - task: type: Retrieval dataset: type: quora name: MTEB QuoraRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 70.548 - type: map_at_10 value: 84.466 - type: map_at_100 value: 85.10600000000001 - type: map_at_1000 value: 85.123 - type: map_at_3 value: 81.57600000000001 - type: map_at_5 value: 83.399 - type: mrr_at_1 value: 81.24 - type: mrr_at_10 value: 87.457 - type: mrr_at_100 value: 87.574 - type: mrr_at_1000 value: 87.575 - type: mrr_at_3 value: 86.507 - type: mrr_at_5 value: 87.205 - type: ndcg_at_1 value: 81.25 - type: ndcg_at_10 value: 88.203 - type: ndcg_at_100 value: 89.457 - type: ndcg_at_1000 value: 89.563 - type: ndcg_at_3 value: 85.465 - type: ndcg_at_5 value: 87.007 - type: precision_at_1 value: 81.25 - type: precision_at_10 value: 13.373 - type: precision_at_100 value: 1.5270000000000001 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 37.417 - type: precision_at_5 value: 24.556 - type: recall_at_1 value: 70.548 - type: recall_at_10 value: 95.208 - type: recall_at_100 value: 99.514 - type: recall_at_1000 value: 99.988 - type: recall_at_3 value: 87.214 - type: recall_at_5 value: 91.696 - task: type: Clustering dataset: type: mteb/reddit-clustering name: MTEB RedditClustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 53.04822095496839 - task: type: Clustering dataset: type: mteb/reddit-clustering-p2p name: MTEB RedditClusteringP2P config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 60.30778476474675 - task: type: Retrieval dataset: type: scidocs name: MTEB SCIDOCS config: default split: test revision: None metrics: - type: map_at_1 value: 4.692 - type: map_at_10 value: 11.766 - type: map_at_100 value: 13.904 - type: map_at_1000 value: 14.216999999999999 - type: map_at_3 value: 8.245 - type: map_at_5 value: 9.92 - type: mrr_at_1 value: 23.0 - type: mrr_at_10 value: 33.78 - type: mrr_at_100 value: 34.922 - type: mrr_at_1000 value: 34.973 - type: mrr_at_3 value: 30.2 - type: mrr_at_5 value: 32.565 - type: ndcg_at_1 value: 23.0 - type: ndcg_at_10 value: 19.863 - type: ndcg_at_100 value: 28.141 - type: ndcg_at_1000 value: 33.549 - type: ndcg_at_3 value: 18.434 - type: ndcg_at_5 value: 16.384 - type: precision_at_1 value: 23.0 - type: precision_at_10 value: 10.39 - type: precision_at_100 value: 2.235 - type: precision_at_1000 value: 0.35300000000000004 - type: precision_at_3 value: 17.133000000000003 - type: precision_at_5 value: 14.44 - type: recall_at_1 value: 4.692 - type: recall_at_10 value: 21.025 - type: recall_at_100 value: 45.324999999999996 - type: recall_at_1000 value: 71.675 - type: recall_at_3 value: 10.440000000000001 - type: recall_at_5 value: 14.64 - task: type: STS dataset: type: mteb/sickr-sts name: MTEB SICK-R config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 84.96178184892842 - type: cos_sim_spearman value: 79.6487740813199 - type: euclidean_pearson value: 82.06661161625023 - type: euclidean_spearman value: 79.64876769031183 - type: manhattan_pearson value: 82.07061164575131 - type: manhattan_spearman value: 79.65197039464537 - task: type: STS dataset: type: mteb/sts12-sts name: MTEB STS12 config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 84.15305604100027 - type: cos_sim_spearman value: 74.27447427941591 - type: euclidean_pearson value: 80.52737337565307 - type: euclidean_spearman value: 74.27416077132192 - type: manhattan_pearson value: 80.53728571140387 - type: manhattan_spearman value: 74.28853605753457 - task: type: STS dataset: type: mteb/sts13-sts name: MTEB STS13 config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 83.44386080639279 - type: cos_sim_spearman value: 84.17947648159536 - type: euclidean_pearson value: 83.34145388129387 - type: euclidean_spearman value: 84.17947648159536 - type: manhattan_pearson value: 83.30699061927966 - type: manhattan_spearman value: 84.18125737380451 - task: type: STS dataset: type: mteb/sts14-sts name: MTEB STS14 config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 81.57392220985612 - type: cos_sim_spearman value: 78.80745014464101 - type: euclidean_pearson value: 80.01660371487199 - type: euclidean_spearman value: 78.80741240102256 - type: manhattan_pearson value: 79.96810779507953 - type: manhattan_spearman value: 78.75600400119448 - task: type: STS dataset: type: mteb/sts15-sts name: MTEB STS15 config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 86.85421063026625 - type: cos_sim_spearman value: 87.55320285299192 - type: euclidean_pearson value: 86.69750143323517 - type: euclidean_spearman value: 87.55320284326378 - type: manhattan_pearson value: 86.63379169960379 - type: manhattan_spearman value: 87.4815029877984 - task: type: STS dataset: type: mteb/sts16-sts name: MTEB STS16 config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 84.31314130411842 - type: cos_sim_spearman value: 85.3489588181433 - type: euclidean_pearson value: 84.13240933463535 - type: euclidean_spearman value: 85.34902871403281 - type: manhattan_pearson value: 84.01183086503559 - type: manhattan_spearman value: 85.19316703166102 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-en) config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 89.09979781689536 - type: cos_sim_spearman value: 88.87813323759015 - type: euclidean_pearson value: 88.65413031123792 - type: euclidean_spearman value: 88.87813323759015 - type: manhattan_pearson value: 88.61818758256024 - type: manhattan_spearman value: 88.81044100494604 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (en) config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 62.30693258111531 - type: cos_sim_spearman value: 62.195516523251946 - type: euclidean_pearson value: 62.951283701049476 - type: euclidean_spearman value: 62.195516523251946 - type: manhattan_pearson value: 63.068322281439535 - type: manhattan_spearman value: 62.10621171028406 - task: type: STS dataset: type: mteb/stsbenchmark-sts name: MTEB STSBenchmark config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 84.27092833763909 - type: cos_sim_spearman value: 84.84429717949759 - type: euclidean_pearson value: 84.8516966060792 - type: euclidean_spearman value: 84.84429717949759 - type: manhattan_pearson value: 84.82203139242881 - type: manhattan_spearman value: 84.8358503952945 - task: type: Reranking dataset: type: mteb/scidocs-reranking name: MTEB SciDocsRR config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 83.10290863981409 - type: mrr value: 95.31168450286097 - task: type: Retrieval dataset: type: scifact name: MTEB SciFact config: default split: test revision: None metrics: - type: map_at_1 value: 52.161 - type: map_at_10 value: 62.138000000000005 - type: map_at_100 value: 62.769 - type: map_at_1000 value: 62.812 - type: map_at_3 value: 59.111000000000004 - type: map_at_5 value: 60.995999999999995 - type: mrr_at_1 value: 55.333 - type: mrr_at_10 value: 63.504000000000005 - type: mrr_at_100 value: 64.036 - type: mrr_at_1000 value: 64.08 - type: mrr_at_3 value: 61.278 - type: mrr_at_5 value: 62.778 - type: ndcg_at_1 value: 55.333 - type: ndcg_at_10 value: 66.678 - type: ndcg_at_100 value: 69.415 - type: ndcg_at_1000 value: 70.453 - type: ndcg_at_3 value: 61.755 - type: ndcg_at_5 value: 64.546 - type: precision_at_1 value: 55.333 - type: precision_at_10 value: 9.033 - type: precision_at_100 value: 1.043 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_3 value: 24.221999999999998 - type: precision_at_5 value: 16.333000000000002 - type: recall_at_1 value: 52.161 - type: recall_at_10 value: 79.156 - type: recall_at_100 value: 91.333 - type: recall_at_1000 value: 99.333 - type: recall_at_3 value: 66.43299999999999 - type: recall_at_5 value: 73.272 - task: type: PairClassification dataset: type: mteb/sprintduplicatequestions-pairclassification name: MTEB SprintDuplicateQuestions config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.81287128712871 - type: cos_sim_ap value: 95.30034785910676 - type: cos_sim_f1 value: 90.28629856850716 - type: cos_sim_precision value: 92.36401673640168 - type: cos_sim_recall value: 88.3 - type: dot_accuracy value: 99.81287128712871 - type: dot_ap value: 95.30034785910676 - type: dot_f1 value: 90.28629856850716 - type: dot_precision value: 92.36401673640168 - type: dot_recall value: 88.3 - type: euclidean_accuracy value: 99.81287128712871 - type: euclidean_ap value: 95.30034785910676 - type: euclidean_f1 value: 90.28629856850716 - type: euclidean_precision value: 92.36401673640168 - type: euclidean_recall value: 88.3 - type: manhattan_accuracy value: 99.80990099009901 - type: manhattan_ap value: 95.26880751950654 - type: manhattan_f1 value: 90.22177419354838 - type: manhattan_precision value: 90.95528455284553 - type: manhattan_recall value: 89.5 - type: max_accuracy value: 99.81287128712871 - type: max_ap value: 95.30034785910676 - type: max_f1 value: 90.28629856850716 - task: type: Clustering dataset: type: mteb/stackexchange-clustering name: MTEB StackExchangeClustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 58.518662504351184 - task: type: Clustering dataset: type: mteb/stackexchange-clustering-p2p name: MTEB StackExchangeClusteringP2P config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 34.96168178378587 - task: type: Reranking dataset: type: mteb/stackoverflowdupquestions-reranking name: MTEB StackOverflowDupQuestions config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 52.04862593471896 - type: mrr value: 52.97238402936932 - task: type: Summarization dataset: type: mteb/summeval name: MTEB SummEval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 30.092545236479946 - type: cos_sim_spearman value: 31.599851000175498 - type: dot_pearson value: 30.092542723901676 - type: dot_spearman value: 31.599851000175498 - task: type: Retrieval dataset: type: trec-covid name: MTEB TRECCOVID config: default split: test revision: None metrics: - type: map_at_1 value: 0.189 - type: map_at_10 value: 1.662 - type: map_at_100 value: 9.384 - type: map_at_1000 value: 22.669 - type: map_at_3 value: 0.5559999999999999 - type: map_at_5 value: 0.9039999999999999 - type: mrr_at_1 value: 68.0 - type: mrr_at_10 value: 81.01899999999999 - type: mrr_at_100 value: 81.01899999999999 - type: mrr_at_1000 value: 81.01899999999999 - type: mrr_at_3 value: 79.333 - type: mrr_at_5 value: 80.733 - type: ndcg_at_1 value: 63.0 - type: ndcg_at_10 value: 65.913 - type: ndcg_at_100 value: 51.895 - type: ndcg_at_1000 value: 46.967 - type: ndcg_at_3 value: 65.49199999999999 - type: ndcg_at_5 value: 66.69699999999999 - type: precision_at_1 value: 68.0 - type: precision_at_10 value: 71.6 - type: precision_at_100 value: 53.66 - type: precision_at_1000 value: 21.124000000000002 - type: precision_at_3 value: 72.667 - type: precision_at_5 value: 74.0 - type: recall_at_1 value: 0.189 - type: recall_at_10 value: 1.913 - type: recall_at_100 value: 12.601999999999999 - type: recall_at_1000 value: 44.296 - type: recall_at_3 value: 0.605 - type: recall_at_5 value: 1.018 - task: type: Retrieval dataset: type: webis-touche2020 name: MTEB Touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 2.701 - type: map_at_10 value: 10.445 - type: map_at_100 value: 17.324 - type: map_at_1000 value: 19.161 - type: map_at_3 value: 5.497 - type: map_at_5 value: 7.278 - type: mrr_at_1 value: 30.612000000000002 - type: mrr_at_10 value: 45.534 - type: mrr_at_100 value: 45.792 - type: mrr_at_1000 value: 45.806999999999995 - type: mrr_at_3 value: 37.755 - type: mrr_at_5 value: 43.469 - type: ndcg_at_1 value: 26.531 - type: ndcg_at_10 value: 26.235000000000003 - type: ndcg_at_100 value: 39.17 - type: ndcg_at_1000 value: 51.038 - type: ndcg_at_3 value: 23.625 - type: ndcg_at_5 value: 24.338 - type: precision_at_1 value: 30.612000000000002 - type: precision_at_10 value: 24.285999999999998 - type: precision_at_100 value: 8.224 - type: precision_at_1000 value: 1.6179999999999999 - type: precision_at_3 value: 24.490000000000002 - type: precision_at_5 value: 24.898 - type: recall_at_1 value: 2.701 - type: recall_at_10 value: 17.997 - type: recall_at_100 value: 51.766999999999996 - type: recall_at_1000 value: 87.863 - type: recall_at_3 value: 6.295000000000001 - type: recall_at_5 value: 9.993 - task: type: Classification dataset: type: mteb/toxic_conversations_50k name: MTEB ToxicConversationsClassification config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 73.3474 - type: ap value: 15.393431414459924 - type: f1 value: 56.466681887882416 - task: type: Classification dataset: type: mteb/tweet_sentiment_extraction name: MTEB TweetSentimentExtractionClassification config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 62.062818336163 - type: f1 value: 62.11230840463252 - task: type: Clustering dataset: type: mteb/twentynewsgroups-clustering name: MTEB TwentyNewsgroupsClustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 42.464892820845115 - task: type: PairClassification dataset: type: mteb/twittersemeval2015-pairclassification name: MTEB TwitterSemEval2015 config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 86.15962329379508 - type: cos_sim_ap value: 74.73674057919256 - type: cos_sim_f1 value: 68.81245642574947 - type: cos_sim_precision value: 61.48255813953488 - type: cos_sim_recall value: 78.12664907651715 - type: dot_accuracy value: 86.15962329379508 - type: dot_ap value: 74.7367634988281 - type: dot_f1 value: 68.81245642574947 - type: dot_precision value: 61.48255813953488 - type: dot_recall value: 78.12664907651715 - type: euclidean_accuracy value: 86.15962329379508 - type: euclidean_ap value: 74.7367761466634 - type: euclidean_f1 value: 68.81245642574947 - type: euclidean_precision value: 61.48255813953488 - type: euclidean_recall value: 78.12664907651715 - type: manhattan_accuracy value: 86.21326816474935 - type: manhattan_ap value: 74.64416473733951 - type: manhattan_f1 value: 68.80924855491331 - type: manhattan_precision value: 61.23456790123457 - type: manhattan_recall value: 78.52242744063325 - type: max_accuracy value: 86.21326816474935 - type: max_ap value: 74.7367761466634 - type: max_f1 value: 68.81245642574947 - task: type: PairClassification dataset: type: mteb/twitterurlcorpus-pairclassification name: MTEB TwitterURLCorpus config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 88.97620988085536 - type: cos_sim_ap value: 86.08680845745758 - type: cos_sim_f1 value: 78.02793637114438 - type: cos_sim_precision value: 73.11082699683736 - type: cos_sim_recall value: 83.65414228518632 - type: dot_accuracy value: 88.97620988085536 - type: dot_ap value: 86.08681149437946 - type: dot_f1 value: 78.02793637114438 - type: dot_precision value: 73.11082699683736 - type: dot_recall value: 83.65414228518632 - type: euclidean_accuracy value: 88.97620988085536 - type: euclidean_ap value: 86.08681215460771 - type: euclidean_f1 value: 78.02793637114438 - type: euclidean_precision value: 73.11082699683736 - type: euclidean_recall value: 83.65414228518632 - type: manhattan_accuracy value: 88.88888888888889 - type: manhattan_ap value: 86.02916327562438 - type: manhattan_f1 value: 78.02063045516843 - type: manhattan_precision value: 73.38851947346994 - type: manhattan_recall value: 83.2768709578072 - type: max_accuracy value: 88.97620988085536 - type: max_ap value: 86.08681215460771 - type: max_f1 value: 78.02793637114438 --- <!-- TODO: add evaluation results here --> <br><br> <p align="center"> <img src="https://aeiljuispo.cloudimg.io/v7/https://cdn-uploads.huggingface.co/production/uploads/603763514de52ff951d89793/AFoybzd5lpBQXEBrQHuTt.png?w=200&h=200&f=face" alt="Finetuner logo: Finetuner helps you to create experiments in order to improve embeddings on search tasks. It accompanies you to deliver the last mile of performance-tuning for neural search applications." width="150px"> </p> <p align="center"> <b>The text embedding set trained by <a href="https://jina.ai/"><b>Jina AI</b></a>.</b> </p> ## Quick Start The easiest way to starting using `jina-embeddings-v2-base-en` is to use Jina AI's [Embedding API](https://jina.ai/embeddings/). ## Intended Usage & Model Info `jina-embeddings-v2-base-en` is an English, monolingual **embedding model** supporting **8192 sequence length**. It is based on a BERT architecture (JinaBERT) that supports the symmetric bidirectional variant of [ALiBi](https://arxiv.org/abs/2108.12409) to allow longer sequence length. The backbone `jina-bert-v2-base-en` is pretrained on the C4 dataset. The model is further trained on Jina AI's collection of more than 400 millions of sentence pairs and hard negatives. These pairs were obtained from various domains and were carefully selected through a thorough cleaning process. The embedding model was trained using 512 sequence length, but extrapolates to 8k sequence length (or even longer) thanks to ALiBi. This makes our model useful for a range of use cases, especially when processing long documents is needed, including long document retrieval, semantic textual similarity, text reranking, recommendation, RAG and LLM-based generative search, etc. With a standard size of 137 million parameters, the model enables fast inference while delivering better performance than our small model. It is recommended to use a single GPU for inference. Additionally, we provide the following embedding models: - [`jina-embeddings-v2-small-en`](https://huggingface.co/jinaai/jina-embeddings-v2-small-en): 33 million parameters. - [`jina-embeddings-v2-base-en`](https://huggingface.co/jinaai/jina-embeddings-v2-base-en): 137 million parameters **(you are here)**. - [`jina-embeddings-v2-base-zh`](https://huggingface.co/jinaai/jina-embeddings-v2-base-zh): Chinese-English Bilingual embeddings. - [`jina-embeddings-v2-base-de`](https://huggingface.co/jinaai/jina-embeddings-v2-base-de): German-English Bilingual embeddings. - [`jina-embeddings-v2-base-es`](https://huggingface.co/jinaai/jina-embeddings-v2-base-es): Spanish-English Bilingual embeddings. ## Data & Parameters Jina Embeddings V2 [technical report](https://arxiv.org/abs/2310.19923) ## Usage **<details><summary>Please apply mean pooling when integrating the model.</summary>** <p> ### Why mean pooling? `mean poooling` takes all token embeddings from model output and averaging them at sentence/paragraph level. It has been proved to be the most effective way to produce high-quality sentence embeddings. We offer an `encode` function to deal with this. However, if you would like to do it without using the default `encode` function: ```python import torch import torch.nn.functional as F from transformers import AutoTokenizer, AutoModel def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) sentences = ['How is the weather today?', 'What is the current weather like today?'] tokenizer = AutoTokenizer.from_pretrained('jinaai/jina-embeddings-v2-small-en') model = AutoModel.from_pretrained('jinaai/jina-embeddings-v2-small-en', trust_remote_code=True) encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') with torch.no_grad(): model_output = model(**encoded_input) embeddings = mean_pooling(model_output, encoded_input['attention_mask']) embeddings = F.normalize(embeddings, p=2, dim=1) ``` </p> </details> You can use Jina Embedding models directly from transformers package. ```python !pip install transformers from transformers import AutoModel from numpy.linalg import norm cos_sim = lambda a,b: (a @ b.T) / (norm(a)*norm(b)) model = AutoModel.from_pretrained('jinaai/jina-embeddings-v2-base-en', trust_remote_code=True) # trust_remote_code is needed to use the encode method embeddings = model.encode(['How is the weather today?', 'What is the current weather like today?']) print(cos_sim(embeddings[0], embeddings[1])) ``` If you only want to handle shorter sequence, such as 2k, pass the `max_length` parameter to the `encode` function: ```python embeddings = model.encode( ['Very long ... document'], max_length=2048 ) ``` Using the its latest release (v2.3.0) sentence-transformers also supports Jina embeddings (Please make sure that you are logged into huggingface as well): ```python !pip install -U sentence-transformers from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim model = SentenceTransformer( "jinaai/jina-embeddings-v2-base-en", # switch to en/zh for English or Chinese trust_remote_code=True ) # control your input sequence length up to 8192 model.max_seq_length = 1024 embeddings = model.encode([ 'How is the weather today?', 'What is the current weather like today?' ]) print(cos_sim(embeddings[0], embeddings[1])) ``` ## Alternatives to Using Transformers (or SentencTransformers) Package 1. _Managed SaaS_: Get started with a free key on Jina AI's [Embedding API](https://jina.ai/embeddings/). 2. _Private and high-performance deployment_: Get started by picking from our suite of models and deploy them on [AWS Sagemaker](https://aws.amazon.com/marketplace/seller-profile?id=seller-stch2ludm6vgy). ## Use Jina Embeddings for RAG According to the latest blog post from [LLamaIndex](https://blog.llamaindex.ai/boosting-rag-picking-the-best-embedding-reranker-models-42d079022e83), > In summary, to achieve the peak performance in both hit rate and MRR, the combination of OpenAI or JinaAI-Base embeddings with the CohereRerank/bge-reranker-large reranker stands out. <img src="https://miro.medium.com/v2/resize:fit:4800/format:webp/1*ZP2RVejCZovF3FDCg-Bx3A.png" width="780px"> ## Plans 1. Bilingual embedding models supporting more European & Asian languages, including Spanish, French, Italian and Japanese. 2. Multimodal embedding models enable Multimodal RAG applications. 3. High-performt rerankers. ## Trouble Shooting **Loading of Model Code failed** If you forgot to pass the `trust_remote_code=True` flag when calling `AutoModel.from_pretrained` or initializing the model via the `SentenceTransformer` class, you will receive an error that the model weights could not be initialized. This is caused by tranformers falling back to creating a default BERT model, instead of a jina-embedding model: ```bash Some weights of the model checkpoint at jinaai/jina-embeddings-v2-base-en were not used when initializing BertModel: ['encoder.layer.2.mlp.layernorm.weight', 'encoder.layer.3.mlp.layernorm.weight', 'encoder.layer.10.mlp.wo.bias', 'encoder.layer.5.mlp.wo.bias', 'encoder.layer.2.mlp.layernorm.bias', 'encoder.layer.1.mlp.gated_layers.weight', 'encoder.layer.5.mlp.gated_layers.weight', 'encoder.layer.8.mlp.layernorm.bias', ... ``` **User is not logged into Huggingface** The model is only availabe under [gated access](https://huggingface.co/docs/hub/models-gated). This means you need to be logged into huggingface load load it. If you receive the following error, you need to provide an access token, either by using the huggingface-cli or providing the token via an environment variable as described above: ```bash OSError: jinaai/jina-embeddings-v2-base-en is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo with `use_auth_token` or log in with `huggingface-cli login` and pass `use_auth_token=True`. ``` ## Contact Join our [Discord community](https://discord.jina.ai) and chat with other community members about ideas. ## Citation If you find Jina Embeddings useful in your research, please cite the following paper: ``` @misc{günther2023jina, title={Jina Embeddings 2: 8192-Token General-Purpose Text Embeddings for Long Documents}, author={Michael Günther and Jackmin Ong and Isabelle Mohr and Alaeddine Abdessalem and Tanguy Abel and Mohammad Kalim Akram and Susana Guzman and Georgios Mastrapas and Saba Sturua and Bo Wang and Maximilian Werk and Nan Wang and Han Xiao}, year={2023}, eprint={2310.19923}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
Helsinki-NLP/opus-mt-vi-en
Helsinki-NLP
"2023-08-16T12:08:32Z"
86,582
10
transformers
[ "transformers", "pytorch", "tf", "marian", "text2text-generation", "translation", "vi", "en", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
"2022-03-02T23:29:04Z"
--- language: - vi - en tags: - translation license: apache-2.0 --- ### vie-eng * source group: Vietnamese * target group: English * OPUS readme: [vie-eng](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/vie-eng/README.md) * model: transformer-align * source language(s): vie vie_Hani * target language(s): eng * model: transformer-align * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus-2020-06-17.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/vie-eng/opus-2020-06-17.zip) * test set translations: [opus-2020-06-17.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/vie-eng/opus-2020-06-17.test.txt) * test set scores: [opus-2020-06-17.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/vie-eng/opus-2020-06-17.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.vie.eng | 42.8 | 0.608 | ### System Info: - hf_name: vie-eng - source_languages: vie - target_languages: eng - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/vie-eng/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['vi', 'en'] - src_constituents: {'vie', 'vie_Hani'} - tgt_constituents: {'eng'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/vie-eng/opus-2020-06-17.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/vie-eng/opus-2020-06-17.test.txt - src_alpha3: vie - tgt_alpha3: eng - short_pair: vi-en - chrF2_score: 0.608 - bleu: 42.8 - brevity_penalty: 0.955 - ref_len: 20241.0 - src_name: Vietnamese - tgt_name: English - train_date: 2020-06-17 - src_alpha2: vi - tgt_alpha2: en - prefer_old: False - long_pair: vie-eng - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
CAMeL-Lab/bert-base-arabic-camelbert-mix-ner
CAMeL-Lab
"2021-10-17T11:13:00Z"
86,475
8
transformers
[ "transformers", "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
"2022-03-02T23:29:04Z"
--- language: - ar license: apache-2.0 widget: - text: "إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع" --- # CAMeLBERT-Mix NER Model ## Model description **CAMeLBERT-Mix NER Model** is a Named Entity Recognition (NER) model that was built by fine-tuning the [CAMeLBERT Mix](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-mix/) model. For the fine-tuning, we used the [ANERcorp](https://camel.abudhabi.nyu.edu/anercorp/) dataset. Our fine-tuning procedure and the hyperparameters we used can be found in our paper *"[The Interplay of Variant, Size, and Task Type in Arabic Pre-trained Language Models](https://arxiv.org/abs/2103.06678). "* Our fine-tuning code can be found [here](https://github.com/CAMeL-Lab/CAMeLBERT). ## Intended uses You can use the CAMeLBERT-Mix NER model directly as part of our [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) NER component (*recommended*) or as part of the transformers pipeline. #### How to use To use the model with the [CAMeL Tools](https://github.com/CAMeL-Lab/camel_tools) NER component: ```python >>> from camel_tools.ner import NERecognizer >>> from camel_tools.tokenizers.word import simple_word_tokenize >>> ner = NERecognizer('CAMeL-Lab/bert-base-arabic-camelbert-mix-ner') >>> sentence = simple_word_tokenize('إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع') >>> ner.predict_sentence(sentence) >>> ['O', 'B-LOC', 'O', 'O', 'O', 'O', 'B-LOC', 'I-LOC', 'I-LOC', 'O'] ``` You can also use the NER model directly with a transformers pipeline: ```python >>> from transformers import pipeline >>> ner = pipeline('ner', model='CAMeL-Lab/bert-base-arabic-camelbert-mix-ner') >>> ner("إمارة أبوظبي هي إحدى إمارات دولة الإمارات العربية المتحدة السبع") [{'word': 'أبوظبي', 'score': 0.9895730018615723, 'entity': 'B-LOC', 'index': 2, 'start': 6, 'end': 12}, {'word': 'الإمارات', 'score': 0.8156259655952454, 'entity': 'B-LOC', 'index': 8, 'start': 33, 'end': 41}, {'word': 'العربية', 'score': 0.890906810760498, 'entity': 'I-LOC', 'index': 9, 'start': 42, 'end': 49}, {'word': 'المتحدة', 'score': 0.8169114589691162, 'entity': 'I-LOC', 'index': 10, 'start': 50, 'end': 57}] ``` *Note*: to download our models, you would need `transformers>=3.5.0`. Otherwise, you could download the models manually. ## Citation ```bibtex @inproceedings{inoue-etal-2021-interplay, title = "The Interplay of Variant, Size, and Task Type in {A}rabic Pre-trained Language Models", author = "Inoue, Go and Alhafni, Bashar and Baimukan, Nurpeiis and Bouamor, Houda and Habash, Nizar", booktitle = "Proceedings of the Sixth Arabic Natural Language Processing Workshop", month = apr, year = "2021", address = "Kyiv, Ukraine (Online)", publisher = "Association for Computational Linguistics", abstract = "In this paper, we explore the effects of language variants, data sizes, and fine-tuning task types in Arabic pre-trained language models. To do so, we build three pre-trained language models across three variants of Arabic: Modern Standard Arabic (MSA), dialectal Arabic, and classical Arabic, in addition to a fourth language model which is pre-trained on a mix of the three. We also examine the importance of pre-training data size by building additional models that are pre-trained on a scaled-down set of the MSA variant. We compare our different models to each other, as well as to eight publicly available models by fine-tuning them on five NLP tasks spanning 12 datasets. Our results suggest that the variant proximity of pre-training data to fine-tuning data is more important than the pre-training data size. We exploit this insight in defining an optimized system selection model for the studied tasks.", } ```
SpanBERT/spanbert-large-cased
SpanBERT
"2021-05-19T11:31:33Z"
86,353
12
transformers
[ "transformers", "pytorch", "jax", "bert", "endpoints_compatible", "region:us" ]
null
"2022-03-02T23:29:05Z"
Entry not found
microsoft/BiomedNLP-BiomedBERT-base-uncased-abstract
microsoft
"2023-11-06T18:04:15Z"
86,152
65
transformers
[ "transformers", "pytorch", "jax", "bert", "fill-mask", "exbert", "en", "arxiv:2007.15779", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
"2022-03-02T23:29:05Z"
--- language: en tags: - exbert license: mit widget: - text: "[MASK] is a tyrosine kinase inhibitor." --- ## MSR BiomedBERT (abstracts only) <div style="border: 2px solid orange; border-radius:10px; padding:0px 10px; width: fit-content;"> * This model was previously named **"PubMedBERT (abstracts)"**. * You can either adopt the new model name "microsoft/BiomedNLP-BiomedBERT-base-uncased-abstract" or update your `transformers` library to version 4.22+ if you need to refer to the old name. </div> Pretraining large neural language models, such as BERT, has led to impressive gains on many natural language processing (NLP) tasks. However, most pretraining efforts focus on general domain corpora, such as newswire and Web. A prevailing assumption is that even domain-specific pretraining can benefit by starting from general-domain language models. [Recent work](https://arxiv.org/abs/2007.15779) shows that for domains with abundant unlabeled text, such as biomedicine, pretraining language models from scratch results in substantial gains over continual pretraining of general-domain language models. This BiomedBERT is pretrained from scratch using _abstracts_ from [PubMed](https://pubmed.ncbi.nlm.nih.gov/). This model achieves state-of-the-art performance on several biomedical NLP tasks, as shown on the [Biomedical Language Understanding and Reasoning Benchmark](https://aka.ms/BLURB). ## Citation If you find BiomedBERT useful in your research, please cite the following paper: ```latex @misc{pubmedbert, author = {Yu Gu and Robert Tinn and Hao Cheng and Michael Lucas and Naoto Usuyama and Xiaodong Liu and Tristan Naumann and Jianfeng Gao and Hoifung Poon}, title = {Domain-Specific Language Model Pretraining for Biomedical Natural Language Processing}, year = {2020}, eprint = {arXiv:2007.15779}, } ``` <a href="https://huggingface.co/exbert/?model=microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract&modelKind=bidirectional&sentence=Gefitinib%20is%20an%20EGFR%20tyrosine%20kinase%20inhibitor,%20which%20is%20often%20used%20for%20breast%20cancer%20and%20NSCLC%20treatment.&layer=10&heads=..0,1,2,3,4,5,6,7,8,9,10,11&threshold=0.7&tokenInd=17&tokenSide=right&maskInds=..&hideClsSep=true"> <img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png"> </a>
mixedbread-ai/mxbai-rerank-xsmall-v1
mixedbread-ai
"2024-07-22T14:31:51Z"
86,098
28
transformers
[ "transformers", "onnx", "safetensors", "deberta-v2", "text-classification", "reranker", "transformers.js", "en", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
"2024-02-29T10:31:57Z"
--- library_name: transformers tags: - reranker - transformers.js license: apache-2.0 language: - en --- <br><br> <p align="center"> <svg xmlns="http://www.w3.org/2000/svg" xml:space="preserve" viewBox="0 0 2020 1130" width="150" height="150" aria-hidden="true"><path fill="#e95a0f" d="M398.167 621.992c-1.387-20.362-4.092-40.739-3.851-61.081.355-30.085 6.873-59.139 21.253-85.976 10.487-19.573 24.09-36.822 40.662-51.515 16.394-14.535 34.338-27.046 54.336-36.182 15.224-6.955 31.006-12.609 47.829-14.168 11.809-1.094 23.753-2.514 35.524-1.836 23.033 1.327 45.131 7.255 66.255 16.75 16.24 7.3 31.497 16.165 45.651 26.969 12.997 9.921 24.412 21.37 34.158 34.509 11.733 15.817 20.849 33.037 25.987 52.018 3.468 12.81 6.438 25.928 7.779 39.097 1.722 16.908 1.642 34.003 2.235 51.021.427 12.253.224 24.547 1.117 36.762 1.677 22.93 4.062 45.764 11.8 67.7 5.376 15.239 12.499 29.55 20.846 43.681l-18.282 20.328c-1.536 1.71-2.795 3.665-4.254 5.448l-19.323 23.533c-13.859-5.449-27.446-11.803-41.657-16.086-13.622-4.106-27.793-6.765-41.905-8.775-15.256-2.173-30.701-3.475-46.105-4.049-23.571-.879-47.178-1.056-70.769-1.029-10.858.013-21.723 1.116-32.57 1.926-5.362.4-10.69 1.255-16.464 1.477-2.758-7.675-5.284-14.865-7.367-22.181-3.108-10.92-4.325-22.554-13.16-31.095-2.598-2.512-5.069-5.341-6.883-8.443-6.366-10.884-12.48-21.917-18.571-32.959-4.178-7.573-8.411-14.375-17.016-18.559-10.34-5.028-19.538-12.387-29.311-18.611-3.173-2.021-6.414-4.312-9.952-5.297-5.857-1.63-11.98-2.301-17.991-3.376z"></path><path fill="#ed6d7b" d="M1478.998 758.842c-12.025.042-24.05.085-36.537-.373-.14-8.536.231-16.569.453-24.607.033-1.179-.315-2.986-1.081-3.4-.805-.434-2.376.338-3.518.81-.856.354-1.562 1.069-3.589 2.521-.239-3.308-.664-5.586-.519-7.827.488-7.544 2.212-15.166 1.554-22.589-1.016-11.451 1.397-14.592-12.332-14.419-3.793.048-3.617-2.803-3.332-5.331.499-4.422 1.45-8.803 1.77-13.233.311-4.316.068-8.672.068-12.861-2.554-.464-4.326-.86-6.12-1.098-4.415-.586-6.051-2.251-5.065-7.31 1.224-6.279.848-12.862 1.276-19.306.19-2.86-.971-4.473-3.794-4.753-4.113-.407-8.242-1.057-12.352-.975-4.663.093-5.192-2.272-4.751-6.012.733-6.229 1.252-12.483 1.875-18.726l1.102-10.495c-5.905-.309-11.146-.805-16.385-.778-3.32.017-5.174-1.4-5.566-4.4-1.172-8.968-2.479-17.944-3.001-26.96-.26-4.484-1.936-5.705-6.005-5.774-9.284-.158-18.563-.594-27.843-.953-7.241-.28-10.137-2.764-11.3-9.899-.746-4.576-2.715-7.801-7.777-8.207-7.739-.621-15.511-.992-23.207-1.961-7.327-.923-14.587-2.415-21.853-3.777-5.021-.941-10.003-2.086-15.003-3.14 4.515-22.952 13.122-44.382 26.284-63.587 18.054-26.344 41.439-47.239 69.102-63.294 15.847-9.197 32.541-16.277 50.376-20.599 16.655-4.036 33.617-5.715 50.622-4.385 33.334 2.606 63.836 13.955 92.415 31.15 15.864 9.545 30.241 20.86 42.269 34.758 8.113 9.374 15.201 19.78 21.718 30.359 10.772 17.484 16.846 36.922 20.611 56.991 1.783 9.503 2.815 19.214 3.318 28.876.758 14.578.755 29.196.65 44.311l-51.545 20.013c-7.779 3.059-15.847 5.376-21.753 12.365-4.73 5.598-10.658 10.316-16.547 14.774-9.9 7.496-18.437 15.988-25.083 26.631-3.333 5.337-7.901 10.381-12.999 14.038-11.355 8.144-17.397 18.973-19.615 32.423l-6.988 41.011z"></path><path fill="#ec663e" d="M318.11 923.047c-.702 17.693-.832 35.433-2.255 53.068-1.699 21.052-6.293 41.512-14.793 61.072-9.001 20.711-21.692 38.693-38.496 53.583-16.077 14.245-34.602 24.163-55.333 30.438-21.691 6.565-43.814 8.127-66.013 6.532-22.771-1.636-43.88-9.318-62.74-22.705-20.223-14.355-35.542-32.917-48.075-54.096-9.588-16.203-16.104-33.55-19.201-52.015-2.339-13.944-2.307-28.011-.403-42.182 2.627-19.545 9.021-37.699 17.963-55.067 11.617-22.564 27.317-41.817 48.382-56.118 15.819-10.74 33.452-17.679 52.444-20.455 8.77-1.282 17.696-1.646 26.568-2.055 11.755-.542 23.534-.562 35.289-1.11 8.545-.399 17.067-1.291 26.193-1.675 1.349 1.77 2.24 3.199 2.835 4.742 4.727 12.261 10.575 23.865 18.636 34.358 7.747 10.084 14.83 20.684 22.699 30.666 3.919 4.972 8.37 9.96 13.609 13.352 7.711 4.994 16.238 8.792 24.617 12.668 5.852 2.707 12.037 4.691 18.074 6.998z"></path><path fill="#ea580e" d="M1285.167 162.995c3.796-29.75 13.825-56.841 32.74-80.577 16.339-20.505 36.013-36.502 59.696-47.614 14.666-6.881 29.971-11.669 46.208-12.749 10.068-.669 20.239-1.582 30.255-.863 16.6 1.191 32.646 5.412 47.9 12.273 19.39 8.722 36.44 20.771 50.582 36.655 15.281 17.162 25.313 37.179 31.49 59.286 5.405 19.343 6.31 39.161 4.705 58.825-2.37 29.045-11.836 55.923-30.451 78.885-10.511 12.965-22.483 24.486-37.181 33.649-5.272-5.613-10.008-11.148-14.539-16.846-5.661-7.118-10.958-14.533-16.78-21.513-4.569-5.478-9.548-10.639-14.624-15.658-3.589-3.549-7.411-6.963-11.551-9.827-5.038-3.485-10.565-6.254-15.798-9.468-8.459-5.195-17.011-9.669-26.988-11.898-12.173-2.72-24.838-4.579-35.622-11.834-1.437-.967-3.433-1.192-5.213-1.542-12.871-2.529-25.454-5.639-36.968-12.471-5.21-3.091-11.564-4.195-17.011-6.965-4.808-2.445-8.775-6.605-13.646-8.851-8.859-4.085-18.114-7.311-27.204-10.896z"></path><path fill="#f8ab00" d="M524.963 311.12c-9.461-5.684-19.513-10.592-28.243-17.236-12.877-9.801-24.031-21.578-32.711-35.412-11.272-17.965-19.605-37.147-21.902-58.403-1.291-11.951-2.434-24.073-1.87-36.034.823-17.452 4.909-34.363 11.581-50.703 8.82-21.603 22.25-39.792 39.568-55.065 18.022-15.894 39.162-26.07 62.351-32.332 19.22-5.19 38.842-6.177 58.37-4.674 23.803 1.831 45.56 10.663 65.062 24.496 17.193 12.195 31.688 27.086 42.894 45.622-11.403 8.296-22.633 16.117-34.092 23.586-17.094 11.142-34.262 22.106-48.036 37.528-8.796 9.848-17.201 20.246-27.131 28.837-16.859 14.585-27.745 33.801-41.054 51.019-11.865 15.349-20.663 33.117-30.354 50.08-5.303 9.283-9.654 19.11-14.434 28.692z"></path><path fill="#ea5227" d="M1060.11 1122.049c-7.377 1.649-14.683 4.093-22.147 4.763-11.519 1.033-23.166 1.441-34.723 1.054-19.343-.647-38.002-4.7-55.839-12.65-15.078-6.72-28.606-15.471-40.571-26.836-24.013-22.81-42.053-49.217-49.518-81.936-1.446-6.337-1.958-12.958-2.235-19.477-.591-13.926-.219-27.909-1.237-41.795-.916-12.5-3.16-24.904-4.408-37.805 1.555-1.381 3.134-2.074 3.778-3.27 4.729-8.79 12.141-15.159 19.083-22.03 5.879-5.818 10.688-12.76 16.796-18.293 6.993-6.335 11.86-13.596 14.364-22.612l8.542-29.993c8.015 1.785 15.984 3.821 24.057 5.286 8.145 1.478 16.371 2.59 24.602 3.493 8.453.927 16.956 1.408 25.891 2.609 1.119 16.09 1.569 31.667 2.521 47.214.676 11.045 1.396 22.154 3.234 33.043 2.418 14.329 5.708 28.527 9.075 42.674 3.499 14.705 4.028 29.929 10.415 44.188 10.157 22.674 18.29 46.25 28.281 69.004 7.175 16.341 12.491 32.973 15.078 50.615.645 4.4 3.256 8.511 4.963 12.755z"></path><path fill="#ea5330" d="M1060.512 1122.031c-2.109-4.226-4.72-8.337-5.365-12.737-2.587-17.642-7.904-34.274-15.078-50.615-9.991-22.755-18.124-46.33-28.281-69.004-6.387-14.259-6.916-29.482-10.415-44.188-3.366-14.147-6.656-28.346-9.075-42.674-1.838-10.889-2.558-21.999-3.234-33.043-.951-15.547-1.401-31.124-2.068-47.146 8.568-.18 17.146.487 25.704.286l41.868-1.4c.907 3.746 1.245 7.04 1.881 10.276l8.651 42.704c.903 4.108 2.334 8.422 4.696 11.829 7.165 10.338 14.809 20.351 22.456 30.345 4.218 5.512 8.291 11.304 13.361 15.955 8.641 7.927 18.065 14.995 27.071 22.532 12.011 10.052 24.452 19.302 40.151 22.854-1.656 11.102-2.391 22.44-5.172 33.253-4.792 18.637-12.38 36.209-23.412 52.216-13.053 18.94-29.086 34.662-49.627 45.055-10.757 5.443-22.443 9.048-34.111 13.501z"></path><path fill="#f8aa05" d="M1989.106 883.951c5.198 8.794 11.46 17.148 15.337 26.491 5.325 12.833 9.744 26.207 12.873 39.737 2.95 12.757 3.224 25.908 1.987 39.219-1.391 14.973-4.643 29.268-10.349 43.034-5.775 13.932-13.477 26.707-23.149 38.405-14.141 17.104-31.215 30.458-50.807 40.488-14.361 7.352-29.574 12.797-45.741 14.594-10.297 1.144-20.732 2.361-31.031 1.894-24.275-1.1-47.248-7.445-68.132-20.263-6.096-3.741-11.925-7.917-17.731-12.342 5.319-5.579 10.361-10.852 15.694-15.811l37.072-34.009c.975-.892 2.113-1.606 3.08-2.505 6.936-6.448 14.765-12.2 20.553-19.556 8.88-11.285 20.064-19.639 31.144-28.292 4.306-3.363 9.06-6.353 12.673-10.358 5.868-6.504 10.832-13.814 16.422-20.582 6.826-8.264 13.727-16.481 20.943-24.401 4.065-4.461 8.995-8.121 13.249-12.424 14.802-14.975 28.77-30.825 45.913-43.317z"></path><path fill="#ed6876" d="M1256.099 523.419c5.065.642 10.047 1.787 15.068 2.728 7.267 1.362 14.526 2.854 21.853 3.777 7.696.97 15.468 1.34 23.207 1.961 5.062.406 7.031 3.631 7.777 8.207 1.163 7.135 4.059 9.62 11.3 9.899l27.843.953c4.069.069 5.745 1.291 6.005 5.774.522 9.016 1.829 17.992 3.001 26.96.392 3 2.246 4.417 5.566 4.4 5.239-.026 10.48.469 16.385.778l-1.102 10.495-1.875 18.726c-.44 3.74.088 6.105 4.751 6.012 4.11-.082 8.239.568 12.352.975 2.823.28 3.984 1.892 3.794 4.753-.428 6.444-.052 13.028-1.276 19.306-.986 5.059.651 6.724 5.065 7.31 1.793.238 3.566.634 6.12 1.098 0 4.189.243 8.545-.068 12.861-.319 4.43-1.27 8.811-1.77 13.233-.285 2.528-.461 5.379 3.332 5.331 13.729-.173 11.316 2.968 12.332 14.419.658 7.423-1.066 15.045-1.554 22.589-.145 2.241.28 4.519.519 7.827 2.026-1.452 2.733-2.167 3.589-2.521 1.142-.472 2.713-1.244 3.518-.81.767.414 1.114 2.221 1.081 3.4l-.917 24.539c-11.215.82-22.45.899-33.636 1.674l-43.952 3.436c-1.086-3.01-2.319-5.571-2.296-8.121.084-9.297-4.468-16.583-9.091-24.116-3.872-6.308-8.764-13.052-9.479-19.987-1.071-10.392-5.716-15.936-14.889-18.979-1.097-.364-2.16-.844-3.214-1.327-7.478-3.428-15.548-5.918-19.059-14.735-.904-2.27-3.657-3.775-5.461-5.723-2.437-2.632-4.615-5.525-7.207-7.987-2.648-2.515-5.352-5.346-8.589-6.777-4.799-2.121-10.074-3.185-15.175-4.596l-15.785-4.155c.274-12.896 1.722-25.901.54-38.662-1.647-17.783-3.457-35.526-2.554-53.352.528-10.426 2.539-20.777 3.948-31.574z"></path><path fill="#f6a200" d="M525.146 311.436c4.597-9.898 8.947-19.725 14.251-29.008 9.691-16.963 18.49-34.73 30.354-50.08 13.309-17.218 24.195-36.434 41.054-51.019 9.93-8.591 18.335-18.989 27.131-28.837 13.774-15.422 30.943-26.386 48.036-37.528 11.459-7.469 22.688-15.29 34.243-23.286 11.705 16.744 19.716 35.424 22.534 55.717 2.231 16.066 2.236 32.441 2.753 49.143-4.756 1.62-9.284 2.234-13.259 4.056-6.43 2.948-12.193 7.513-18.774 9.942-19.863 7.331-33.806 22.349-47.926 36.784-7.86 8.035-13.511 18.275-19.886 27.705-4.434 6.558-9.345 13.037-12.358 20.254-4.249 10.177-6.94 21.004-10.296 31.553-12.33.053-24.741 1.027-36.971-.049-20.259-1.783-40.227-5.567-58.755-14.69-.568-.28-1.295-.235-2.132-.658z"></path><path fill="#f7a80d" d="M1989.057 883.598c-17.093 12.845-31.061 28.695-45.863 43.67-4.254 4.304-9.184 7.963-13.249 12.424-7.216 7.92-14.117 16.137-20.943 24.401-5.59 6.768-10.554 14.078-16.422 20.582-3.614 4.005-8.367 6.995-12.673 10.358-11.08 8.653-22.264 17.007-31.144 28.292-5.788 7.356-13.617 13.108-20.553 19.556-.967.899-2.105 1.614-3.08 2.505l-37.072 34.009c-5.333 4.96-10.375 10.232-15.859 15.505-21.401-17.218-37.461-38.439-48.623-63.592 3.503-1.781 7.117-2.604 9.823-4.637 8.696-6.536 20.392-8.406 27.297-17.714.933-1.258 2.646-1.973 4.065-2.828 17.878-10.784 36.338-20.728 53.441-32.624 10.304-7.167 18.637-17.23 27.583-26.261 3.819-3.855 7.436-8.091 10.3-12.681 12.283-19.68 24.43-39.446 40.382-56.471 12.224-13.047 17.258-29.524 22.539-45.927 15.85 4.193 29.819 12.129 42.632 22.08 10.583 8.219 19.782 17.883 27.42 29.351z"></path><path fill="#ef7a72" d="M1479.461 758.907c1.872-13.734 4.268-27.394 6.525-41.076 2.218-13.45 8.26-24.279 19.615-32.423 5.099-3.657 9.667-8.701 12.999-14.038 6.646-10.643 15.183-19.135 25.083-26.631 5.888-4.459 11.817-9.176 16.547-14.774 5.906-6.99 13.974-9.306 21.753-12.365l51.48-19.549c.753 11.848.658 23.787 1.641 35.637 1.771 21.353 4.075 42.672 11.748 62.955.17.449.107.985-.019 2.158-6.945 4.134-13.865 7.337-20.437 11.143-3.935 2.279-7.752 5.096-10.869 8.384-6.011 6.343-11.063 13.624-17.286 19.727-9.096 8.92-12.791 20.684-18.181 31.587-.202.409-.072.984-.096 1.481-8.488-1.72-16.937-3.682-25.476-5.094-9.689-1.602-19.426-3.084-29.201-3.949-15.095-1.335-30.241-2.1-45.828-3.172z"></path><path fill="#e94e3b" d="M957.995 766.838c-20.337-5.467-38.791-14.947-55.703-27.254-8.2-5.967-15.451-13.238-22.958-20.37 2.969-3.504 5.564-6.772 8.598-9.563 7.085-6.518 11.283-14.914 15.8-23.153 4.933-8.996 10.345-17.743 14.966-26.892 2.642-5.231 5.547-11.01 5.691-16.611.12-4.651.194-8.932 2.577-12.742 8.52-13.621 15.483-28.026 18.775-43.704 2.11-10.049 7.888-18.774 7.81-29.825-.064-9.089 4.291-18.215 6.73-27.313 3.212-11.983 7.369-23.797 9.492-35.968 3.202-18.358 5.133-36.945 7.346-55.466l4.879-45.8c6.693.288 13.386.575 20.54 1.365.13 3.458-.41 6.407-.496 9.37l-1.136 42.595c-.597 11.552-2.067 23.058-3.084 34.59l-3.845 44.478c-.939 10.202-1.779 20.432-3.283 30.557-.96 6.464-4.46 12.646-1.136 19.383.348.706-.426 1.894-.448 2.864-.224 9.918-5.99 19.428-2.196 29.646.103.279-.033.657-.092.983l-8.446 46.205c-1.231 6.469-2.936 12.846-4.364 19.279-1.5 6.757-2.602 13.621-4.456 20.277-3.601 12.93-10.657 25.3-5.627 39.47.368 1.036.234 2.352.017 3.476l-5.949 30.123z"></path><path fill="#ea5043" d="M958.343 767.017c1.645-10.218 3.659-20.253 5.602-30.302.217-1.124.351-2.44-.017-3.476-5.03-14.17 2.026-26.539 5.627-39.47 1.854-6.656 2.956-13.52 4.456-20.277 1.428-6.433 3.133-12.81 4.364-19.279l8.446-46.205c.059-.326.196-.705.092-.983-3.794-10.218 1.972-19.728 2.196-29.646.022-.97.796-2.158.448-2.864-3.324-6.737.176-12.919 1.136-19.383 1.504-10.125 2.344-20.355 3.283-30.557l3.845-44.478c1.017-11.532 2.488-23.038 3.084-34.59.733-14.18.722-28.397 1.136-42.595.086-2.963.626-5.912.956-9.301 5.356-.48 10.714-.527 16.536-.081 2.224 15.098 1.855 29.734 1.625 44.408-.157 10.064 1.439 20.142 1.768 30.23.334 10.235-.035 20.49.116 30.733.084 5.713.789 11.418.861 17.13.054 4.289-.469 8.585-.702 12.879-.072 1.323-.138 2.659-.031 3.975l2.534 34.405-1.707 36.293-1.908 48.69c-.182 8.103.993 16.237.811 24.34-.271 12.076-1.275 24.133-1.787 36.207-.102 2.414-.101 5.283 1.06 7.219 4.327 7.22 4.463 15.215 4.736 23.103.365 10.553.088 21.128.086 31.693-11.44 2.602-22.84.688-34.106-.916-11.486-1.635-22.806-4.434-34.546-6.903z"></path><path fill="#eb5d19" d="M398.091 622.45c6.086.617 12.21 1.288 18.067 2.918 3.539.985 6.779 3.277 9.952 5.297 9.773 6.224 18.971 13.583 29.311 18.611 8.606 4.184 12.839 10.986 17.016 18.559l18.571 32.959c1.814 3.102 4.285 5.931 6.883 8.443 8.835 8.542 10.052 20.175 13.16 31.095 2.082 7.317 4.609 14.507 6.946 22.127-29.472 3.021-58.969 5.582-87.584 15.222-1.185-2.302-1.795-4.362-2.769-6.233-4.398-8.449-6.703-18.174-14.942-24.299-2.511-1.866-5.103-3.814-7.047-6.218-8.358-10.332-17.028-20.276-28.772-26.973 4.423-11.478 9.299-22.806 13.151-34.473 4.406-13.348 6.724-27.18 6.998-41.313.098-5.093.643-10.176 1.06-15.722z"></path><path fill="#e94c32" d="M981.557 392.109c-1.172 15.337-2.617 30.625-4.438 45.869-2.213 18.521-4.144 37.108-7.346 55.466-2.123 12.171-6.28 23.985-9.492 35.968-2.439 9.098-6.794 18.224-6.73 27.313.078 11.051-5.7 19.776-7.81 29.825-3.292 15.677-10.255 30.082-18.775 43.704-2.383 3.81-2.458 8.091-2.577 12.742-.144 5.6-3.049 11.38-5.691 16.611-4.621 9.149-10.033 17.896-14.966 26.892-4.517 8.239-8.715 16.635-15.8 23.153-3.034 2.791-5.629 6.06-8.735 9.255-12.197-10.595-21.071-23.644-29.301-37.24-7.608-12.569-13.282-25.962-17.637-40.37 13.303-6.889 25.873-13.878 35.311-25.315.717-.869 1.934-1.312 2.71-2.147 5.025-5.405 10.515-10.481 14.854-16.397 6.141-8.374 10.861-17.813 17.206-26.008 8.22-10.618 13.657-22.643 20.024-34.466 4.448-.626 6.729-3.21 8.114-6.89 1.455-3.866 2.644-7.895 4.609-11.492 4.397-8.05 9.641-15.659 13.708-23.86 3.354-6.761 5.511-14.116 8.203-21.206 5.727-15.082 7.277-31.248 12.521-46.578 3.704-10.828 3.138-23.116 4.478-34.753l7.56-.073z"></path><path fill="#f7a617" d="M1918.661 831.99c-4.937 16.58-9.971 33.057-22.196 46.104-15.952 17.025-28.099 36.791-40.382 56.471-2.864 4.59-6.481 8.825-10.3 12.681-8.947 9.031-17.279 19.094-27.583 26.261-17.103 11.896-35.564 21.84-53.441 32.624-1.419.856-3.132 1.571-4.065 2.828-6.904 9.308-18.6 11.178-27.297 17.714-2.705 2.033-6.319 2.856-9.874 4.281-3.413-9.821-6.916-19.583-9.36-29.602-1.533-6.284-1.474-12.957-1.665-19.913 1.913-.78 3.374-1.057 4.81-1.431 15.822-4.121 31.491-8.029 43.818-20.323 9.452-9.426 20.371-17.372 30.534-26.097 6.146-5.277 13.024-10.052 17.954-16.326 14.812-18.848 28.876-38.285 43.112-57.581 2.624-3.557 5.506-7.264 6.83-11.367 2.681-8.311 4.375-16.94 6.476-25.438 17.89.279 35.333 3.179 52.629 9.113z"></path><path fill="#ea553a" d="M1172.91 977.582c-15.775-3.127-28.215-12.377-40.227-22.43-9.005-7.537-18.43-14.605-27.071-22.532-5.07-4.651-9.143-10.443-13.361-15.955-7.647-9.994-15.291-20.007-22.456-30.345-2.361-3.407-3.792-7.72-4.696-11.829-3.119-14.183-5.848-28.453-8.651-42.704-.636-3.236-.974-6.53-1.452-10.209 15.234-2.19 30.471-3.969 46.408-5.622 2.692 5.705 4.882 11.222 6.63 16.876 2.9 9.381 7.776 17.194 15.035 24.049 7.056 6.662 13.305 14.311 19.146 22.099 9.509 12.677 23.01 19.061 36.907 25.054-1.048 7.441-2.425 14.854-3.066 22.33-.956 11.162-1.393 22.369-2.052 33.557l-1.096 17.661z"></path><path fill="#ea5453" d="M1163.123 704.036c-4.005 5.116-7.685 10.531-12.075 15.293-12.842 13.933-27.653 25.447-44.902 34.538-3.166-5.708-5.656-11.287-8.189-17.251-3.321-12.857-6.259-25.431-9.963-37.775-4.6-15.329-10.6-30.188-11.349-46.562-.314-6.871-1.275-14.287-7.114-19.644-1.047-.961-1.292-3.053-1.465-4.67l-4.092-39.927c-.554-5.245-.383-10.829-2.21-15.623-3.622-9.503-4.546-19.253-4.688-29.163-.088-6.111 1.068-12.256.782-18.344-.67-14.281-1.76-28.546-2.9-42.8-.657-8.222-1.951-16.395-2.564-24.62-.458-6.137-.285-12.322-.104-18.21.959 5.831 1.076 11.525 2.429 16.909 2.007 7.986 5.225 15.664 7.324 23.632 3.222 12.23 1.547 25.219 6.728 37.355 4.311 10.099 6.389 21.136 9.732 31.669 2.228 7.02 6.167 13.722 7.121 20.863 1.119 8.376 6.1 13.974 10.376 20.716l2.026 10.576c1.711 9.216 3.149 18.283 8.494 26.599 6.393 9.946 11.348 20.815 16.943 31.276 4.021 7.519 6.199 16.075 12.925 22.065l24.462 22.26c.556.503 1.507.571 2.274.841z"></path><path fill="#ea5b15" d="M1285.092 163.432c9.165 3.148 18.419 6.374 27.279 10.459 4.871 2.246 8.838 6.406 13.646 8.851 5.446 2.77 11.801 3.874 17.011 6.965 11.514 6.831 24.097 9.942 36.968 12.471 1.78.35 3.777.576 5.213 1.542 10.784 7.255 23.448 9.114 35.622 11.834 9.977 2.23 18.529 6.703 26.988 11.898 5.233 3.214 10.76 5.983 15.798 9.468 4.14 2.864 7.962 6.279 11.551 9.827 5.076 5.02 10.056 10.181 14.624 15.658 5.822 6.98 11.119 14.395 16.78 21.513 4.531 5.698 9.267 11.233 14.222 16.987-10.005 5.806-20.07 12.004-30.719 16.943-7.694 3.569-16.163 5.464-24.688 7.669-2.878-7.088-5.352-13.741-7.833-20.392-.802-2.15-1.244-4.55-2.498-6.396-4.548-6.7-9.712-12.999-14.011-19.847-6.672-10.627-15.34-18.93-26.063-25.376-9.357-5.625-18.367-11.824-27.644-17.587-6.436-3.997-12.902-8.006-19.659-11.405-5.123-2.577-11.107-3.536-16.046-6.37-17.187-9.863-35.13-17.887-54.031-23.767-4.403-1.37-8.953-2.267-13.436-3.382l.926-27.565z"></path><path fill="#ea504b" d="M1098 737l7.789 16.893c-15.04 9.272-31.679 15.004-49.184 17.995-9.464 1.617-19.122 2.097-29.151 3.019-.457-10.636-.18-21.211-.544-31.764-.273-7.888-.409-15.883-4.736-23.103-1.16-1.936-1.162-4.805-1.06-7.219l1.787-36.207c.182-8.103-.993-16.237-.811-24.34.365-16.236 1.253-32.461 1.908-48.69.484-12 .942-24.001 1.98-36.069 5.57 10.19 10.632 20.42 15.528 30.728 1.122 2.362 2.587 5.09 2.339 7.488-1.536 14.819 5.881 26.839 12.962 38.33 10.008 16.241 16.417 33.54 20.331 51.964 2.285 10.756 4.729 21.394 11.958 30.165L1098 737z"></path><path fill="#f6a320" d="M1865.78 822.529c-1.849 8.846-3.544 17.475-6.224 25.786-1.323 4.102-4.206 7.81-6.83 11.367l-43.112 57.581c-4.93 6.273-11.808 11.049-17.954 16.326-10.162 8.725-21.082 16.671-30.534 26.097-12.327 12.294-27.997 16.202-43.818 20.323-1.436.374-2.897.651-4.744.986-1.107-17.032-1.816-34.076-2.079-51.556 1.265-.535 2.183-.428 2.888-.766 10.596-5.072 20.8-11.059 32.586-13.273 1.69-.317 3.307-1.558 4.732-2.662l26.908-21.114c4.992-4.003 11.214-7.393 14.381-12.585 11.286-18.5 22.363-37.263 27.027-58.87l36.046 1.811c3.487.165 6.983.14 10.727.549z"></path><path fill="#ec6333" d="M318.448 922.814c-6.374-2.074-12.56-4.058-18.412-6.765-8.379-3.876-16.906-7.675-24.617-12.668-5.239-3.392-9.69-8.381-13.609-13.352-7.87-9.983-14.953-20.582-22.699-30.666-8.061-10.493-13.909-22.097-18.636-34.358-.595-1.543-1.486-2.972-2.382-4.783 6.84-1.598 13.797-3.023 20.807-4.106 18.852-2.912 36.433-9.493 53.737-17.819.697.888.889 1.555 1.292 2.051l17.921 21.896c4.14 4.939 8.06 10.191 12.862 14.412 5.67 4.984 12.185 9.007 18.334 13.447-8.937 16.282-16.422 33.178-20.696 51.31-1.638 6.951-2.402 14.107-3.903 21.403z"></path><path fill="#f49700" d="M623.467 326.903c2.893-10.618 5.584-21.446 9.833-31.623 3.013-7.217 7.924-13.696 12.358-20.254 6.375-9.43 12.026-19.67 19.886-27.705 14.12-14.434 28.063-29.453 47.926-36.784 6.581-2.429 12.344-6.994 18.774-9.942 3.975-1.822 8.503-2.436 13.186-3.592 1.947 18.557 3.248 37.15 8.307 55.686-15.453 7.931-28.853 18.092-40.46 29.996-10.417 10.683-19.109 23.111-28.013 35.175-3.238 4.388-4.888 9.948-7.262 14.973-17.803-3.987-35.767-6.498-54.535-5.931z"></path><path fill="#ea544c" d="M1097.956 736.615c-2.925-3.218-5.893-6.822-8.862-10.425-7.229-8.771-9.672-19.409-11.958-30.165-3.914-18.424-10.323-35.722-20.331-51.964-7.081-11.491-14.498-23.511-12.962-38.33.249-2.398-1.217-5.126-2.339-7.488l-15.232-31.019-3.103-34.338c-.107-1.316-.041-2.653.031-3.975.233-4.294.756-8.59.702-12.879-.072-5.713-.776-11.417-.861-17.13l-.116-30.733c-.329-10.088-1.926-20.166-1.768-30.23.23-14.674.599-29.31-1.162-44.341 9.369-.803 18.741-1.179 28.558-1.074 1.446 15.814 2.446 31.146 3.446 46.478.108 6.163-.064 12.348.393 18.485.613 8.225 1.907 16.397 2.564 24.62l2.9 42.8c.286 6.088-.869 12.234-.782 18.344.142 9.91 1.066 19.661 4.688 29.163 1.827 4.794 1.657 10.377 2.21 15.623l4.092 39.927c.172 1.617.417 3.71 1.465 4.67 5.839 5.357 6.8 12.773 7.114 19.644.749 16.374 6.749 31.233 11.349 46.562 3.704 12.344 6.642 24.918 9.963 37.775z"></path><path fill="#ec5c61" d="M1204.835 568.008c1.254 25.351-1.675 50.16-10.168 74.61-8.598-4.883-18.177-8.709-24.354-15.59-7.44-8.289-13.929-17.442-21.675-25.711-8.498-9.072-16.731-18.928-21.084-31.113-.54-1.513-1.691-2.807-2.594-4.564-4.605-9.247-7.706-18.544-7.96-29.09-.835-7.149-1.214-13.944-2.609-20.523-2.215-10.454-5.626-20.496-7.101-31.302-2.513-18.419-7.207-36.512-5.347-55.352.24-2.43-.17-4.949-.477-7.402l-4.468-34.792c2.723-.379 5.446-.757 8.585-.667 1.749 8.781 2.952 17.116 4.448 25.399 1.813 10.037 3.64 20.084 5.934 30.017 1.036 4.482 3.953 8.573 4.73 13.064 1.794 10.377 4.73 20.253 9.272 29.771 2.914 6.105 4.761 12.711 7.496 18.912 2.865 6.496 6.264 12.755 9.35 19.156 3.764 7.805 7.667 15.013 16.1 19.441 7.527 3.952 13.713 10.376 20.983 14.924 6.636 4.152 13.932 7.25 20.937 10.813z"></path><path fill="#ed676f" d="M1140.75 379.231c18.38-4.858 36.222-11.21 53.979-18.971 3.222 3.368 5.693 6.744 8.719 9.512 2.333 2.134 5.451 5.07 8.067 4.923 7.623-.429 12.363 2.688 17.309 8.215 5.531 6.18 12.744 10.854 19.224 16.184-5.121 7.193-10.461 14.241-15.323 21.606-13.691 20.739-22.99 43.255-26.782 67.926-.543 3.536-1.281 7.043-2.366 10.925-14.258-6.419-26.411-14.959-32.731-29.803-1.087-2.553-2.596-4.93-3.969-7.355-1.694-2.993-3.569-5.89-5.143-8.943-1.578-3.062-2.922-6.249-4.295-9.413-1.57-3.621-3.505-7.163-4.47-10.946-1.257-4.93-.636-10.572-2.725-15.013-5.831-12.397-7.467-25.628-9.497-38.847z"></path><path fill="#ed656e" d="M1254.103 647.439c5.325.947 10.603 2.272 15.847 3.722 5.101 1.41 10.376 2.475 15.175 4.596 3.237 1.431 5.942 4.262 8.589 6.777 2.592 2.462 4.77 5.355 7.207 7.987 1.804 1.948 4.557 3.453 5.461 5.723 3.51 8.817 11.581 11.307 19.059 14.735 1.053.483 2.116.963 3.214 1.327 9.172 3.043 13.818 8.587 14.889 18.979.715 6.935 5.607 13.679 9.479 19.987 4.623 7.533 9.175 14.819 9.091 24.116-.023 2.55 1.21 5.111 1.874 8.055-19.861 2.555-39.795 4.296-59.597 9.09l-11.596-23.203c-1.107-2.169-2.526-4.353-4.307-5.975-7.349-6.694-14.863-13.209-22.373-19.723l-17.313-14.669c-2.776-2.245-5.935-4.017-8.92-6.003l11.609-38.185c1.508-5.453 1.739-11.258 2.613-17.336z"></path><path fill="#ec6168" d="M1140.315 379.223c2.464 13.227 4.101 26.459 9.931 38.856 2.089 4.441 1.468 10.083 2.725 15.013.965 3.783 2.9 7.325 4.47 10.946 1.372 3.164 2.716 6.351 4.295 9.413 1.574 3.053 3.449 5.95 5.143 8.943 1.372 2.425 2.882 4.803 3.969 7.355 6.319 14.844 18.473 23.384 32.641 30.212.067 5.121-.501 10.201-.435 15.271l.985 38.117c.151 4.586.616 9.162.868 14.201-7.075-3.104-14.371-6.202-21.007-10.354-7.269-4.548-13.456-10.972-20.983-14.924-8.434-4.428-12.337-11.637-16.1-19.441-3.087-6.401-6.485-12.66-9.35-19.156-2.735-6.201-4.583-12.807-7.496-18.912-4.542-9.518-7.477-19.394-9.272-29.771-.777-4.491-3.694-8.581-4.73-13.064-2.294-9.933-4.121-19.98-5.934-30.017-1.496-8.283-2.699-16.618-4.036-25.335 10.349-2.461 20.704-4.511 31.054-6.582.957-.191 1.887-.515 3.264-.769z"></path><path fill="#e94c28" d="M922 537c-6.003 11.784-11.44 23.81-19.66 34.428-6.345 8.196-11.065 17.635-17.206 26.008-4.339 5.916-9.828 10.992-14.854 16.397-.776.835-1.993 1.279-2.71 2.147-9.439 11.437-22.008 18.427-35.357 24.929-4.219-10.885-6.942-22.155-7.205-33.905l-.514-49.542c7.441-2.893 14.452-5.197 21.334-7.841 1.749-.672 3.101-2.401 4.604-3.681 6.749-5.745 12.845-12.627 20.407-16.944 7.719-4.406 14.391-9.101 18.741-16.889.626-1.122 1.689-2.077 2.729-2.877 7.197-5.533 12.583-12.51 16.906-20.439.68-1.247 2.495-1.876 4.105-2.651 2.835 1.408 5.267 2.892 7.884 3.892 3.904 1.491 4.392 3.922 2.833 7.439-1.47 3.318-2.668 6.756-4.069 10.106-1.247 2.981-.435 5.242 2.413 6.544 2.805 1.282 3.125 3.14 1.813 5.601l-6.907 12.799L922 537z"></path><path fill="#eb5659" d="M1124.995 566c.868 1.396 2.018 2.691 2.559 4.203 4.353 12.185 12.586 22.041 21.084 31.113 7.746 8.269 14.235 17.422 21.675 25.711 6.176 6.881 15.756 10.707 24.174 15.932-6.073 22.316-16.675 42.446-31.058 60.937-1.074-.131-2.025-.199-2.581-.702l-24.462-22.26c-6.726-5.99-8.904-14.546-12.925-22.065-5.594-10.461-10.55-21.33-16.943-31.276-5.345-8.315-6.783-17.383-8.494-26.599-.63-3.394-1.348-6.772-1.738-10.848-.371-6.313-1.029-11.934-1.745-18.052l6.34 4.04 1.288-.675-2.143-15.385 9.454 1.208v-8.545L1124.995 566z"></path><path fill="#f5a02d" d="M1818.568 820.096c-4.224 21.679-15.302 40.442-26.587 58.942-3.167 5.192-9.389 8.582-14.381 12.585l-26.908 21.114c-1.425 1.104-3.042 2.345-4.732 2.662-11.786 2.214-21.99 8.201-32.586 13.273-.705.338-1.624.231-2.824.334a824.35 824.35 0 0 1-8.262-42.708c4.646-2.14 9.353-3.139 13.269-5.47 5.582-3.323 11.318-6.942 15.671-11.652 7.949-8.6 14.423-18.572 22.456-27.081 8.539-9.046 13.867-19.641 18.325-30.922l46.559 8.922z"></path><path fill="#eb5a57" d="M1124.96 565.639c-5.086-4.017-10.208-8.395-15.478-12.901v8.545l-9.454-1.208 2.143 15.385-1.288.675-6.34-4.04c.716 6.118 1.375 11.74 1.745 17.633-4.564-6.051-9.544-11.649-10.663-20.025-.954-7.141-4.892-13.843-7.121-20.863-3.344-10.533-5.421-21.57-9.732-31.669-5.181-12.135-3.506-25.125-6.728-37.355-2.099-7.968-5.317-15.646-7.324-23.632-1.353-5.384-1.47-11.078-2.429-16.909l-3.294-46.689a278.63 278.63 0 0 1 27.57-2.084c2.114 12.378 3.647 24.309 5.479 36.195 1.25 8.111 2.832 16.175 4.422 24.23 1.402 7.103 2.991 14.169 4.55 21.241 1.478 6.706.273 14.002 4.6 20.088 5.401 7.597 7.176 16.518 9.467 25.337 1.953 7.515 5.804 14.253 11.917 19.406.254 10.095 3.355 19.392 7.96 28.639z"></path><path fill="#ea541c" d="M911.651 810.999c-2.511 10.165-5.419 20.146-8.2 30.162-2.503 9.015-7.37 16.277-14.364 22.612-6.108 5.533-10.917 12.475-16.796 18.293-6.942 6.871-14.354 13.24-19.083 22.03-.644 1.196-2.222 1.889-3.705 2.857-2.39-7.921-4.101-15.991-6.566-23.823-5.451-17.323-12.404-33.976-23.414-48.835l21.627-21.095c3.182-3.29 5.532-7.382 8.295-11.083l10.663-14.163c9.528 4.78 18.925 9.848 28.625 14.247 7.324 3.321 15.036 5.785 22.917 8.799z"></path><path fill="#eb5d19" d="M1284.092 191.421c4.557.69 9.107 1.587 13.51 2.957 18.901 5.881 36.844 13.904 54.031 23.767 4.938 2.834 10.923 3.792 16.046 6.37 6.757 3.399 13.224 7.408 19.659 11.405l27.644 17.587c10.723 6.446 19.392 14.748 26.063 25.376 4.299 6.848 9.463 13.147 14.011 19.847 1.254 1.847 1.696 4.246 2.498 6.396l7.441 20.332c-11.685 1.754-23.379 3.133-35.533 4.037-.737-2.093-.995-3.716-1.294-5.33-3.157-17.057-14.048-30.161-23.034-44.146-3.027-4.71-7.786-8.529-12.334-11.993-9.346-7.116-19.004-13.834-28.688-20.491-6.653-4.573-13.311-9.251-20.431-13.002-8.048-4.24-16.479-7.85-24.989-11.091-11.722-4.465-23.673-8.328-35.527-12.449l.927-19.572z"></path><path fill="#eb5e24" d="M1283.09 211.415c11.928 3.699 23.88 7.562 35.602 12.027 8.509 3.241 16.941 6.852 24.989 11.091 7.12 3.751 13.778 8.429 20.431 13.002 9.684 6.657 19.342 13.375 28.688 20.491 4.548 3.463 9.307 7.283 12.334 11.993 8.986 13.985 19.877 27.089 23.034 44.146.299 1.615.557 3.237.836 5.263-13.373-.216-26.749-.839-40.564-1.923-2.935-9.681-4.597-18.92-12.286-26.152-15.577-14.651-30.4-30.102-45.564-45.193-.686-.683-1.626-1.156-2.516-1.584l-47.187-22.615 2.203-20.546z"></path><path fill="#e9511f" d="M913 486.001c-1.29.915-3.105 1.543-3.785 2.791-4.323 7.929-9.709 14.906-16.906 20.439-1.04.8-2.103 1.755-2.729 2.877-4.35 7.788-11.022 12.482-18.741 16.889-7.562 4.317-13.658 11.199-20.407 16.944-1.503 1.28-2.856 3.009-4.604 3.681-6.881 2.643-13.893 4.948-21.262 7.377-.128-11.151.202-22.302.378-33.454.03-1.892-.6-3.795-.456-6.12 13.727-1.755 23.588-9.527 33.278-17.663 2.784-2.337 6.074-4.161 8.529-6.784l29.057-31.86c1.545-1.71 3.418-3.401 4.221-5.459 5.665-14.509 11.49-28.977 16.436-43.736 2.817-8.407 4.074-17.338 6.033-26.032 5.039.714 10.078 1.427 15.536 2.629-.909 8.969-2.31 17.438-3.546 25.931-2.41 16.551-5.84 32.839-11.991 48.461L913 486.001z"></path><path fill="#ea5741" d="M1179.451 903.828c-14.224-5.787-27.726-12.171-37.235-24.849-5.841-7.787-12.09-15.436-19.146-22.099-7.259-6.854-12.136-14.667-15.035-24.049-1.748-5.654-3.938-11.171-6.254-17.033 15.099-4.009 30.213-8.629 44.958-15.533l28.367 36.36c6.09 8.015 13.124 14.75 22.72 18.375-7.404 14.472-13.599 29.412-17.48 45.244-.271 1.106-.382 2.25-.895 3.583z"></path><path fill="#ea522a" d="M913.32 486.141c2.693-7.837 5.694-15.539 8.722-23.231 6.151-15.622 9.581-31.91 11.991-48.461l3.963-25.861c7.582.317 15.168 1.031 22.748 1.797 4.171.421 8.333.928 12.877 1.596-.963 11.836-.398 24.125-4.102 34.953-5.244 15.33-6.794 31.496-12.521 46.578-2.692 7.09-4.849 14.445-8.203 21.206-4.068 8.201-9.311 15.81-13.708 23.86-1.965 3.597-3.154 7.627-4.609 11.492-1.385 3.68-3.666 6.265-8.114 6.89-1.994-1.511-3.624-3.059-5.077-4.44l6.907-12.799c1.313-2.461.993-4.318-1.813-5.601-2.849-1.302-3.66-3.563-2.413-6.544 1.401-3.35 2.599-6.788 4.069-10.106 1.558-3.517 1.071-5.948-2.833-7.439-2.617-1-5.049-2.484-7.884-3.892z"></path><path fill="#eb5e24" d="M376.574 714.118c12.053 6.538 20.723 16.481 29.081 26.814 1.945 2.404 4.537 4.352 7.047 6.218 8.24 6.125 10.544 15.85 14.942 24.299.974 1.871 1.584 3.931 2.376 6.29-7.145 3.719-14.633 6.501-21.386 10.517-9.606 5.713-18.673 12.334-28.425 18.399-3.407-3.73-6.231-7.409-9.335-10.834l-30.989-33.862c11.858-11.593 22.368-24.28 31.055-38.431 1.86-3.031 3.553-6.164 5.632-9.409z"></path><path fill="#e95514" d="M859.962 787.636c-3.409 5.037-6.981 9.745-10.516 14.481-2.763 3.701-5.113 7.792-8.295 11.083-6.885 7.118-14.186 13.834-21.65 20.755-13.222-17.677-29.417-31.711-48.178-42.878-.969-.576-2.068-.934-3.27-1.709 6.28-8.159 12.733-15.993 19.16-23.849 1.459-1.783 2.718-3.738 4.254-5.448l18.336-19.969c4.909 5.34 9.619 10.738 14.081 16.333 9.72 12.19 21.813 21.566 34.847 29.867.411.262.725.674 1.231 1.334z"></path><path fill="#eb5f2d" d="M339.582 762.088l31.293 33.733c3.104 3.425 5.928 7.104 9.024 10.979-12.885 11.619-24.548 24.139-33.899 38.704-.872 1.359-1.56 2.837-2.644 4.428-6.459-4.271-12.974-8.294-18.644-13.278-4.802-4.221-8.722-9.473-12.862-14.412l-17.921-21.896c-.403-.496-.595-1.163-.926-2.105 16.738-10.504 32.58-21.87 46.578-36.154z"></path><path fill="#f28d00" d="M678.388 332.912c1.989-5.104 3.638-10.664 6.876-15.051 8.903-12.064 17.596-24.492 28.013-35.175 11.607-11.904 25.007-22.064 40.507-29.592 4.873 11.636 9.419 23.412 13.67 35.592-5.759 4.084-11.517 7.403-16.594 11.553-4.413 3.607-8.124 8.092-12.023 12.301-5.346 5.772-10.82 11.454-15.782 17.547-3.929 4.824-7.17 10.208-10.716 15.344l-33.95-12.518z"></path><path fill="#f08369" d="M1580.181 771.427c-.191-.803-.322-1.377-.119-1.786 5.389-10.903 9.084-22.666 18.181-31.587 6.223-6.103 11.276-13.385 17.286-19.727 3.117-3.289 6.933-6.105 10.869-8.384 6.572-3.806 13.492-7.009 20.461-10.752 1.773 3.23 3.236 6.803 4.951 10.251l12.234 24.993c-1.367 1.966-2.596 3.293-3.935 4.499-7.845 7.07-16.315 13.564-23.407 21.32-6.971 7.623-12.552 16.517-18.743 24.854l-37.777-13.68z"></path><path fill="#f18b5e" d="M1618.142 785.4c6.007-8.63 11.588-17.524 18.559-25.147 7.092-7.755 15.562-14.249 23.407-21.32 1.338-1.206 2.568-2.534 3.997-4.162l28.996 33.733c1.896 2.205 4.424 3.867 6.66 6.394-6.471 7.492-12.967 14.346-19.403 21.255l-18.407 19.953c-12.958-12.409-27.485-22.567-43.809-30.706z"></path><path fill="#f49c3a" d="M1771.617 811.1c-4.066 11.354-9.394 21.949-17.933 30.995-8.032 8.509-14.507 18.481-22.456 27.081-4.353 4.71-10.089 8.329-15.671 11.652-3.915 2.331-8.623 3.331-13.318 5.069-4.298-9.927-8.255-19.998-12.1-30.743 4.741-4.381 9.924-7.582 13.882-11.904 7.345-8.021 14.094-16.603 20.864-25.131 4.897-6.168 9.428-12.626 14.123-18.955l32.61 11.936z"></path><path fill="#f08000" d="M712.601 345.675c3.283-5.381 6.524-10.765 10.453-15.589 4.962-6.093 10.435-11.774 15.782-17.547 3.899-4.21 7.61-8.695 12.023-12.301 5.078-4.15 10.836-7.469 16.636-11.19a934.12 934.12 0 0 1 23.286 35.848c-4.873 6.234-9.676 11.895-14.63 17.421l-25.195 27.801c-11.713-9.615-24.433-17.645-38.355-24.443z"></path><path fill="#ed6e04" d="M751.11 370.42c8.249-9.565 16.693-18.791 25.041-28.103 4.954-5.526 9.757-11.187 14.765-17.106 7.129 6.226 13.892 13.041 21.189 19.225 5.389 4.567 11.475 8.312 17.53 12.92-5.51 7.863-10.622 15.919-17.254 22.427-8.881 8.716-18.938 16.233-28.49 24.264-5.703-6.587-11.146-13.427-17.193-19.682-4.758-4.921-10.261-9.121-15.587-13.944z"></path><path fill="#ea541c" d="M921.823 385.544c-1.739 9.04-2.995 17.971-5.813 26.378-4.946 14.759-10.771 29.227-16.436 43.736-.804 2.058-2.676 3.749-4.221 5.459l-29.057 31.86c-2.455 2.623-5.745 4.447-8.529 6.784-9.69 8.135-19.551 15.908-33.208 17.237-1.773-9.728-3.147-19.457-4.091-29.6l36.13-16.763c.581-.267 1.046-.812 1.525-1.269 8.033-7.688 16.258-15.19 24.011-23.152 4.35-4.467 9.202-9.144 11.588-14.69 6.638-15.425 15.047-30.299 17.274-47.358 3.536.344 7.072.688 10.829 1.377z"></path><path fill="#f3944d" d="M1738.688 798.998c-4.375 6.495-8.906 12.953-13.803 19.121-6.771 8.528-13.519 17.11-20.864 25.131-3.958 4.322-9.141 7.523-13.925 11.54-8.036-13.464-16.465-26.844-27.999-38.387 5.988-6.951 12.094-13.629 18.261-20.25l19.547-20.95 38.783 23.794z"></path><path fill="#ec6168" d="M1239.583 703.142c3.282 1.805 6.441 3.576 9.217 5.821 5.88 4.755 11.599 9.713 17.313 14.669l22.373 19.723c1.781 1.622 3.2 3.806 4.307 5.975 3.843 7.532 7.477 15.171 11.194 23.136-10.764 4.67-21.532 8.973-32.69 12.982l-22.733-27.366c-2.003-2.416-4.096-4.758-6.194-7.093-3.539-3.94-6.927-8.044-10.74-11.701-2.57-2.465-5.762-4.283-8.675-6.39l16.627-29.755z"></path><path fill="#ec663e" d="M1351.006 332.839l-28.499 10.33c-.294.107-.533.367-1.194.264-11.067-19.018-27.026-32.559-44.225-44.855-4.267-3.051-8.753-5.796-13.138-8.682l9.505-24.505c10.055 4.069 19.821 8.227 29.211 13.108 3.998 2.078 7.299 5.565 10.753 8.598 3.077 2.701 5.743 5.891 8.926 8.447 4.116 3.304 9.787 5.345 12.62 9.432 6.083 8.777 10.778 18.517 16.041 27.863z"></path><path fill="#eb5e5b" d="M1222.647 733.051c3.223 1.954 6.415 3.771 8.985 6.237 3.813 3.658 7.201 7.761 10.74 11.701l6.194 7.093 22.384 27.409c-13.056 6.836-25.309 14.613-36.736 24.161l-39.323-44.7 24.494-27.846c1.072-1.224 1.974-2.598 3.264-4.056z"></path><path fill="#ea580e" d="M876.001 376.171c5.874 1.347 11.748 2.694 17.812 4.789-.81 5.265-2.687 9.791-2.639 14.296.124 11.469-4.458 20.383-12.73 27.863-2.075 1.877-3.659 4.286-5.668 6.248l-22.808 21.967c-.442.422-1.212.488-1.813.757l-23.113 10.389-9.875 4.514c-2.305-6.09-4.609-12.181-6.614-18.676 7.64-4.837 15.567-8.54 22.18-13.873 9.697-7.821 18.931-16.361 27.443-25.455 5.613-5.998 12.679-11.331 14.201-20.475.699-4.2 2.384-8.235 3.623-12.345z"></path><path fill="#e95514" d="M815.103 467.384c3.356-1.894 6.641-3.415 9.94-4.903l23.113-10.389c.6-.269 1.371-.335 1.813-.757l22.808-21.967c2.008-1.962 3.593-4.371 5.668-6.248 8.272-7.48 12.854-16.394 12.73-27.863-.049-4.505 1.828-9.031 2.847-13.956 5.427.559 10.836 1.526 16.609 2.68-1.863 17.245-10.272 32.119-16.91 47.544-2.387 5.546-7.239 10.223-11.588 14.69-7.753 7.962-15.978 15.464-24.011 23.152-.478.458-.944 1.002-1.525 1.269l-36.069 16.355c-2.076-6.402-3.783-12.81-5.425-19.607z"></path><path fill="#eb620b" d="M783.944 404.402c9.499-8.388 19.556-15.905 28.437-24.621 6.631-6.508 11.744-14.564 17.575-22.273 9.271 4.016 18.501 8.375 27.893 13.43-4.134 7.07-8.017 13.778-12.833 19.731-5.785 7.15-12.109 13.917-18.666 20.376-7.99 7.869-16.466 15.244-24.731 22.832l-17.674-29.475z"></path><path fill="#ea544c" d="M1197.986 854.686c-9.756-3.309-16.79-10.044-22.88-18.059l-28.001-36.417c8.601-5.939 17.348-11.563 26.758-17.075 1.615 1.026 2.639 1.876 3.505 2.865l26.664 30.44c3.723 4.139 7.995 7.785 12.017 11.656l-18.064 26.591z"></path><path fill="#ec6333" d="M1351.41 332.903c-5.667-9.409-10.361-19.149-16.445-27.926-2.833-4.087-8.504-6.128-12.62-9.432-3.184-2.555-5.849-5.745-8.926-8.447-3.454-3.033-6.756-6.52-10.753-8.598-9.391-4.88-19.157-9.039-29.138-13.499 1.18-5.441 2.727-10.873 4.81-16.607 11.918 4.674 24.209 8.261 34.464 14.962 14.239 9.304 29.011 18.453 39.595 32.464 2.386 3.159 5.121 6.077 7.884 8.923 6.564 6.764 10.148 14.927 11.723 24.093l-20.594 4.067z"></path><path fill="#eb5e5b" d="M1117 536.549c-6.113-4.702-9.965-11.44-11.917-18.955-2.292-8.819-4.066-17.74-9.467-25.337-4.327-6.085-3.122-13.382-4.6-20.088l-4.55-21.241c-1.59-8.054-3.172-16.118-4.422-24.23l-5.037-36.129c6.382-1.43 12.777-2.462 19.582-3.443 1.906 11.646 3.426 23.24 4.878 34.842.307 2.453.717 4.973.477 7.402-1.86 18.84 2.834 36.934 5.347 55.352 1.474 10.806 4.885 20.848 7.101 31.302 1.394 6.579 1.774 13.374 2.609 20.523z"></path><path fill="#ec644b" d="M1263.638 290.071c4.697 2.713 9.183 5.458 13.45 8.509 17.199 12.295 33.158 25.836 43.873 44.907-8.026 4.725-16.095 9.106-24.83 13.372-11.633-15.937-25.648-28.515-41.888-38.689-1.609-1.008-3.555-1.48-5.344-2.2 2.329-3.852 4.766-7.645 6.959-11.573l7.78-14.326z"></path><path fill="#eb5f2d" d="M1372.453 328.903c-2.025-9.233-5.608-17.396-12.172-24.16-2.762-2.846-5.498-5.764-7.884-8.923-10.584-14.01-25.356-23.16-39.595-32.464-10.256-6.701-22.546-10.289-34.284-15.312.325-5.246 1.005-10.444 2.027-15.863l47.529 22.394c.89.428 1.83.901 2.516 1.584l45.564 45.193c7.69 7.233 9.352 16.472 11.849 26.084-5.032.773-10.066 1.154-15.55 1.466z"></path><path fill="#e95a0f" d="M801.776 434.171c8.108-7.882 16.584-15.257 24.573-23.126 6.558-6.459 12.881-13.226 18.666-20.376 4.817-5.953 8.7-12.661 13.011-19.409 5.739 1.338 11.463 3.051 17.581 4.838-.845 4.183-2.53 8.219-3.229 12.418-1.522 9.144-8.588 14.477-14.201 20.475-8.512 9.094-17.745 17.635-27.443 25.455-6.613 5.333-14.54 9.036-22.223 13.51-2.422-4.469-4.499-8.98-6.735-13.786z"></path><path fill="#eb5e5b" d="M1248.533 316.002c2.155.688 4.101 1.159 5.71 2.168 16.24 10.174 30.255 22.752 41.532 38.727-7.166 5.736-14.641 11.319-22.562 16.731-1.16-1.277-1.684-2.585-2.615-3.46l-38.694-36.2 14.203-15.029c.803-.86 1.38-1.93 2.427-2.936z"></path><path fill="#eb5a57" d="M1216.359 827.958c-4.331-3.733-8.603-7.379-12.326-11.518l-26.664-30.44c-.866-.989-1.89-1.839-3.152-2.902 6.483-6.054 13.276-11.959 20.371-18.005l39.315 44.704c-5.648 6.216-11.441 12.12-17.544 18.161z"></path><path fill="#ec6168" d="M1231.598 334.101l38.999 36.066c.931.876 1.456 2.183 2.303 3.608-4.283 4.279-8.7 8.24-13.769 12.091-4.2-3.051-7.512-6.349-11.338-8.867-12.36-8.136-22.893-18.27-32.841-29.093l16.646-13.805z"></path><path fill="#ed656e" d="M1214.597 347.955c10.303 10.775 20.836 20.908 33.196 29.044 3.825 2.518 7.137 5.816 10.992 8.903-3.171 4.397-6.65 8.648-10.432 13.046-6.785-5.184-13.998-9.858-19.529-16.038-4.946-5.527-9.687-8.644-17.309-8.215-2.616.147-5.734-2.788-8.067-4.923-3.026-2.769-5.497-6.144-8.35-9.568 6.286-4.273 12.715-8.237 19.499-12.25z"></path></svg> </p> <p align="center"> <b>The crispy rerank family from <a href="https://mixedbread.ai"><b>Mixedbread</b></a>.</b> </p> # mxbai-rerank-xsmall-v1 This is the smallest model in our family of powerful reranker models. You can learn more about the models in our [blog post](https://www.mixedbread.ai/blog/mxbai-rerank-v1). We have three models: - [mxbai-rerank-xsmall-v1](https://huggingface.co/mixedbread-ai/mxbai-rerank-xsmall-v1) (🍞) - [mxbai-rerank-base-v1](https://huggingface.co/mixedbread-ai/mxbai-rerank-base-v1) - [mxbai-rerank-large-v1](https://huggingface.co/mixedbread-ai/mxbai-rerank-large-v1) ## Quickstart Currently, the best way to use our models is with the most recent version of sentence-transformers. `pip install -U sentence-transformers` Let's say you have a query, and you want to rerank a set of documents. You can do that with only one line of code: ```python from sentence_transformers import CrossEncoder # Load the model, here we use our base sized model model = CrossEncoder("mixedbread-ai/mxbai-rerank-xsmall-v1") # Example query and documents query = "Who wrote 'To Kill a Mockingbird'?" documents = [ "'To Kill a Mockingbird' is a novel by Harper Lee published in 1960. It was immediately successful, winning the Pulitzer Prize, and has become a classic of modern American literature.", "The novel 'Moby-Dick' was written by Herman Melville and first published in 1851. It is considered a masterpiece of American literature and deals with complex themes of obsession, revenge, and the conflict between good and evil.", "Harper Lee, an American novelist widely known for her novel 'To Kill a Mockingbird', was born in 1926 in Monroeville, Alabama. She received the Pulitzer Prize for Fiction in 1961.", "Jane Austen was an English novelist known primarily for her six major novels, which interpret, critique and comment upon the British landed gentry at the end of the 18th century.", "The 'Harry Potter' series, which consists of seven fantasy novels written by British author J.K. Rowling, is among the most popular and critically acclaimed books of the modern era.", "'The Great Gatsby', a novel written by American author F. Scott Fitzgerald, was published in 1925. The story is set in the Jazz Age and follows the life of millionaire Jay Gatsby and his pursuit of Daisy Buchanan." ] # Lets get the scores results = model.rank(query, documents, return_documents=True, top_k=3) ``` <details> <summary>JavaScript Example</summary> Install [transformers.js](https://github.com/xenova/transformers.js) `npm i @xenova/transformers` Let's say you have a query, and you want to rerank a set of documents. In JavaScript, you need to add a function: ```javascript import { AutoTokenizer, AutoModelForSequenceClassification } from '@xenova/transformers'; const model_id = 'mixedbread-ai/mxbai-rerank-xsmall-v1'; const model = await AutoModelForSequenceClassification.from_pretrained(model_id); const tokenizer = await AutoTokenizer.from_pretrained(model_id); /** * Performs ranking with the CrossEncoder on the given query and documents. Returns a sorted list with the document indices and scores. * @param {string} query A single query * @param {string[]} documents A list of documents * @param {Object} options Options for ranking * @param {number} [options.top_k=undefined] Return the top-k documents. If undefined, all documents are returned. * @param {number} [options.return_documents=false] If true, also returns the documents. If false, only returns the indices and scores. */ async function rank(query, documents, { top_k = undefined, return_documents = false, } = {}) { const inputs = tokenizer( new Array(documents.length).fill(query), { text_pair: documents, padding: true, truncation: true, } ) const { logits } = await model(inputs); return logits .sigmoid() .tolist() .map(([score], i) => ({ corpus_id: i, score, ...(return_documents ? { text: documents[i] } : {}) })) .sort((a, b) => b.score - a.score) .slice(0, top_k); } // Example usage: const query = "Who wrote 'To Kill a Mockingbird'?" const documents = [ "'To Kill a Mockingbird' is a novel by Harper Lee published in 1960. It was immediately successful, winning the Pulitzer Prize, and has become a classic of modern American literature.", "The novel 'Moby-Dick' was written by Herman Melville and first published in 1851. It is considered a masterpiece of American literature and deals with complex themes of obsession, revenge, and the conflict between good and evil.", "Harper Lee, an American novelist widely known for her novel 'To Kill a Mockingbird', was born in 1926 in Monroeville, Alabama. She received the Pulitzer Prize for Fiction in 1961.", "Jane Austen was an English novelist known primarily for her six major novels, which interpret, critique and comment upon the British landed gentry at the end of the 18th century.", "The 'Harry Potter' series, which consists of seven fantasy novels written by British author J.K. Rowling, is among the most popular and critically acclaimed books of the modern era.", "'The Great Gatsby', a novel written by American author F. Scott Fitzgerald, was published in 1925. The story is set in the Jazz Age and follows the life of millionaire Jay Gatsby and his pursuit of Daisy Buchanan." ] const results = await rank(query, documents, { return_documents: true, top_k: 3 }); console.log(results); ``` </details> ## Using API You can use the large model via our API as follows: ```python from mixedbread_ai.client import MixedbreadAI mxbai = MixedbreadAI(api_key="{MIXEDBREAD_API_KEY}") res = mxbai.reranking( model="mixedbread-ai/mxbai-rerank-large-v1", query="Who is the author of To Kill a Mockingbird?", input=[ "To Kill a Mockingbird is a novel by Harper Lee published in 1960. It was immediately successful, winning the Pulitzer Prize, and has become a classic of modern American literature.", "The novel Moby-Dick was written by Herman Melville and first published in 1851. It is considered a masterpiece of American literature and deals with complex themes of obsession, revenge, and the conflict between good and evil.", "Harper Lee, an American novelist widely known for her novel To Kill a Mockingbird, was born in 1926 in Monroeville, Alabama. She received the Pulitzer Prize for Fiction in 1961.", "Jane Austen was an English novelist known primarily for her six major novels, which interpret, critique and comment upon the British landed gentry at the end of the 18th century.", "The Harry Potter series, which consists of seven fantasy novels written by British author J.K. Rowling, is among the most popular and critically acclaimed books of the modern era.", "The Great Gatsby, a novel written by American author F. Scott Fitzgerald, was published in 1925. The story is set in the Jazz Age and follows the life of millionaire Jay Gatsby and his pursuit of Daisy Buchanan." ], top_k=3, return_input=false ) print(res.data) ``` The API comes with additional features, such as a continous trained reranker! Check out the [docs](https://www.mixedbread.ai/docs) for more information. ## Evaluation Our reranker models are designed to elevate your search. They work extremely well in combination with keyword search and can even outperform semantic search systems in many cases. | Model | NDCG@10 | Accuracy@3 | | ------------------------------------------------------------------------------------- | -------- | ---------- | | Lexical Search (Lucene) | 38.0 | 66.4 | | [BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base) | 41.6 | 66.9 | | [BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) | 45.2 | 70.6 | | cohere-embed-v3 (semantic search) | 47.5 | 70.9 | | [mxbai-rerank-xsmall-v1](https://huggingface.co/mixedbread-ai/mxbai-rerank-xsmall-v1) | **43.9** | **70.0** | | [mxbai-rerank-base-v1](https://huggingface.co/mixedbread-ai/mxbai-rerank-base-v1) | **46.9** | **72.3** | | [mxbai-rerank-large-v1](https://huggingface.co/mixedbread-ai/mxbai-rerank-large-v1) | **48.8** | **74.9** | The reported results are aggregated from 11 datasets of BEIR. We used [Pyserini](https://github.com/castorini/pyserini/) to evaluate the models. Find more in our [blog-post](https://www.mixedbread.ai/blog/mxbai-rerank-v1) and on this [spreadsheet](https://docs.google.com/spreadsheets/d/15ELkSMFv-oHa5TRiIjDvhIstH9dlc3pnZeO-iGz4Ld4/edit?usp=sharing). ## Community Please join our [Discord Community](https://discord.gg/jDfMHzAVfU) and share your feedback and thoughts! We are here to help and also always happy to chat. ## License Apache 2.0
unsloth/Phi-3.5-mini-instruct-bnb-4bit
unsloth
"2024-08-20T23:52:52Z"
86,075
9
transformers
[ "transformers", "safetensors", "llama", "text-generation", "unsloth", "phi3", "phi", "conversational", "multilingual", "arxiv:2404.14219", "arxiv:2407.13833", "license:mit", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "4-bit", "bitsandbytes", "region:us" ]
text-generation
"2024-08-20T23:29:36Z"
--- license_link: https://huggingface.co/microsoft/Phi-3.5-mini-instruct/resolve/main/LICENSE language: - multilingual library_name: transformers license: mit tags: - unsloth - transformers - phi3 - phi --- # Finetune Phi-3.5, Llama 3.1, Mistral 2-5x faster with 70% less memory via Unsloth! We have a free Google Colab Tesla T4 notebook for Phi-3.5 (mini) here: https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/Discord%20button.png" width="200"/>](https://discord.gg/unsloth) [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth) ## ✨ Finetune for Free All notebooks are **beginner friendly**! Add your dataset, click "Run All", and you'll get a 2x faster finetuned model which can be exported to GGUF, vLLM or uploaded to Hugging Face. | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------------|--------------------------------------------------------------------------------------------------------------------------|-------------|----------| | **Llama-3.1 8b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) | 2.4x faster | 58% less | | **Phi-3.5 (mini)** | [▶️ Start on Colab](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) | 2x faster | 50% less | | **Gemma-2 9b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) | 2.4x faster | 58% less | | **Mistral 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 2.2x faster | 62% less | | **TinyLlama** | [▶️ Start on Colab](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 74% less | | **DPO - Zephyr** | [▶️ Start on Colab](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 19% less | - This [conversational notebook](https://colab.research.google.com/drive/1Aau3lgPzeZKQ-98h69CCu1UJcvIBLmy2?usp=sharing) is useful for ShareGPT ChatML / Vicuna templates. - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for raw text. This [DPO notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) replicates Zephyr. - \* Kaggle has 2x T4s, but we use 1. Due to overhead, 1x T4 is 5x faster. ## Special Thanks A huge thank you to Microsoft AI and Phi team for creating and releasing these models. ## Model Summary Phi-3.5-mini is a lightweight, state-of-the-art open model built upon datasets used for Phi-3 - synthetic data and filtered publicly available websites - with a focus on very high-quality, reasoning dense data. The model belongs to the Phi-3 model family and supports 128K token context length. The model underwent a rigorous enhancement process, incorporating both supervised fine-tuning, proximal policy optimization, and direct preference optimization to ensure precise instruction adherence and robust safety measures. 🏡 [Phi-3 Portal](https://azure.microsoft.com/en-us/products/phi-3) <br> 📰 [Phi-3 Microsoft Blog](https://aka.ms/phi3.5-techblog) <br> 📖 [Phi-3 Technical Report](https://arxiv.org/abs/2404.14219) <br> 👩‍🍳 [Phi-3 Cookbook](https://github.com/microsoft/Phi-3CookBook) <br> 🖥️ [Try It](https://aka.ms/try-phi3.5mini) <br> **Phi-3.5**: [[mini-instruct]](https://huggingface.co/microsoft/Phi-3.5-mini-instruct); [[MoE-instruct]](https://huggingface.co/microsoft/Phi-3.5-MoE-instruct) ; [[vision-instruct]](https://huggingface.co/microsoft/Phi-3.5-vision-instruct) ## Intended Uses ### Primary Use Cases The model is intended for commercial and research use in multiple languages. The model provides uses for general purpose AI systems and applications which require: 1) Memory/compute constrained environments 2) Latency bound scenarios 3) Strong reasoning (especially code, math and logic) Our model is designed to accelerate research on language and multimodal models, for use as a building block for generative AI powered features. ### Use Case Considerations Our models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fariness before using within a specific downstream use case, particularly for high risk scenarios. Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case. ***Nothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under.*** ## Release Notes This is an update over the June 2024 instruction-tuned Phi-3 Mini release based on valuable user feedback. The model used additional post-training data leading to substantial gains on multilingual, multi-turn conversation quality, and reasoning capability. We believe most use cases will benefit from this release, but we encourage users to test in their particular AI applications. We appreciate the enthusiastic adoption of the Phi-3 model family, and continue to welcome all feedback from the community. ### Multilingual The table below highlights multilingual capability of the Phi-3.5 Mini on multilingual MMLU, MEGA, and multilingual MMLU-pro datasets. Overall, we observed that even with just 3.8B active parameters, the model is competitive on multilingual tasks in comparison to other models with a much bigger active parameters. | Benchmark | Phi-3.5 Mini-Ins | Phi-3.1-Mini-128K-Ins | Mistral-7B-Instruct-v0.3 | Mistral-Nemo-12B-Ins-2407 | Llama-3.1-8B-Ins | Gemma-2-9B-Ins | Gemini 1.5 Flash | GPT-4o-mini-2024-07-18 (Chat) | |----------------------------|------------------|-----------------------|--------------------------|---------------------------|------------------|----------------|------------------|-------------------------------| | Multilingual MMLU | 55.4 | 51.08 | 47.4 | 58.9 | 56.2 | 63.8 | 77.2 | 72.9 | | Multilingual MMLU-Pro | 30.9 | 30.21 | 15.0 | 34.0 | 21.4 | 43.0 | 57.9 | 53.2 | | MGSM | 47.9 | 41.56 | 31.8 | 63.3 | 56.7 | 75.1 | 75.8 | 81.7 | | MEGA MLQA | 61.7 | 55.5 | 43.9 | 61.2 | 45.2 | 54.4 | 61.6 | 70.0 | | MEGA TyDi QA | 62.2 | 55.9 | 54.0 | 63.7 | 54.5 | 65.6 | 63.6 | 81.8 | | MEGA UDPOS | 46.5 | 48.1 | 57.2 | 58.2 | 54.1 | 56.6 | 62.4 | 66.0 | | MEGA XCOPA | 63.1 | 62.4 | 58.8 | 10.8 | 21.1 | 31.2 | 95.0 | 90.3 | | MEGA XStoryCloze | 73.5 | 73.6 | 75.5 | 92.3 | 71.0 | 87.0 | 20.7 | 96.6 | | **Average** | **55.2** | **52.3** | **47.9** | **55.3** | **47.5** | **59.6** | **64.3** | **76.6** | The table below shows Multilingual MMLU scores in some of the supported languages. For more multi-lingual benchmarks and details, see [Appendix A](#appendix-a). | Benchmark | Phi-3.5 Mini-Ins | Phi-3.1-Mini-128K-Ins | Mistral-7B-Instruct-v0.3 | Mistral-Nemo-12B-Ins-2407 | Llama-3.1-8B-Ins | Gemma-2-9B-Ins | Gemini 1.5 Flash | GPT-4o-mini-2024-07-18 (Chat) | |-----------|------------------|-----------------------|--------------------------|---------------------------|------------------|----------------|------------------|-------------------------------| | Arabic | 44.2 | 35.4 | 33.7 | 45.3 | 49.1 | 56.3 | 73.6 | 67.1 | | Chinese | 52.6 | 46.9 | 45.9 | 58.2 | 54.4 | 62.7 | 66.7 | 70.8 | | Dutch | 57.7 | 48.0 | 51.3 | 60.1 | 55.9 | 66.7 | 80.6 | 74.2 | | French | 61.1 | 61.7 | 53.0 | 63.8 | 62.8 | 67.0 | 82.9 | 75.6 | | German | 62.4 | 61.3 | 50.1 | 64.5 | 59.9 | 65.7 | 79.5 | 74.3 | | Italian | 62.8 | 63.1 | 52.5 | 64.1 | 55.9 | 65.7 | 82.6 | 75.9 | | Russian | 50.4 | 45.3 | 48.9 | 59.0 | 57.4 | 63.2 | 78.7 | 72.6 | | Spanish | 62.6 | 61.3 | 53.9 | 64.3 | 62.6 | 66.0 | 80.0 | 75.5 | | Ukrainian | 45.2 | 36.7 | 46.9 | 56.6 | 52.9 | 62.0 | 77.4 | 72.6 | ### Long Context Phi-3.5-mini supports 128K context length, therefore the model is capable of several long context tasks including long document/meeting summarization, long document QA, long document information retrieval. We see that Phi-3.5-mini is clearly better than Gemma-2 family which only supports 8K context length. Phi-3.5-mini is competitive with other much larger open-weight models such as Llama-3.1-8B-instruct, Mistral-7B-instruct-v0.3, and Mistral-Nemo-12B-instruct-2407. | Benchmark | Phi-3.5-mini-instruct | Llama-3.1-8B-instruct | Mistral-7B-instruct-v0.3 | Mistral-Nemo-12B-instruct-2407 | Gemini-1.5-Flash | GPT-4o-mini-2024-07-18 (Chat) | |--|--|--|--|--|--|--| | GovReport | 25.9 | 25.1 | 26.0 | 25.6 | 27.8 | 24.8 | | QMSum | 21.3 | 21.6 | 21.3 | 22.1 | 24.0 | 21.7 | | Qasper | 41.9 | 37.2 | 31.4 | 30.7 | 43.5 | 39.8 | | SQuALITY | 25.3 | 26.2 | 25.9 | 25.8 | 23.5 | 23.8 | | SummScreenFD | 16.0 | 17.6 | 17.5 | 18.2 | 16.3 | 17.0 | | **Average** | **26.1** | **25.5** | **24.4** | **24.5** | **27.0** | **25.4** | RULER: a retrieval-based benchmark for long context understanding | Model | 4K | 8K | 16K | 32K | 64K | 128K | Average | |--|--|--|--|--|--|--|--| | **Phi-3.5-mini-instruct** | 94.3 | 91.1 | 90.7 | 87.1 | 78.0 | 63.6 | **84.1** | | **Llama-3.1-8B-instruct** | 95.5 | 93.8 | 91.6 | 87.4 | 84.7 | 77.0 | **88.3** | | **Mistral-Nemo-12B-instruct-2407** | 87.8 | 87.2 | 87.7 | 69.0 | 46.8 | 19.0 | **66.2** | RepoQA: a benchmark for long context code understanding | Model | Python | C++ | Rust | Java | TypeScript | Average | |--|--|--|--|--|--|--| | **Phi-3.5-mini-instruct** | 86 | 67 | 73 | 77 | 82 | **77** | | **Llama-3.1-8B-instruct** | 80 | 65 | 73 | 76 | 63 | **71** | | **Mistral-7B-instruct-v0.3** | 61 | 57 | 51 | 61 | 80 | **62** | ## Usage ### Requirements Phi-3 family has been integrated in the `4.43.0` version of `transformers`. The current `transformers` version can be verified with: `pip list | grep transformers`. Examples of required packages: ``` flash_attn==2.5.8 torch==2.3.1 accelerate==0.31.0 transformers==4.43.0 ``` Phi-3.5-mini-instruct is also available in [Azure AI Studio](https://aka.ms/try-phi3.5mini) ### Tokenizer Phi-3.5-mini-Instruct supports a vocabulary size of up to `32064` tokens. The [tokenizer files](https://huggingface.co/microsoft/Phi-3.5-mini-instruct/blob/main/added_tokens.json) already provide placeholder tokens that can be used for downstream fine-tuning, but they can also be extended up to the model's vocabulary size. ### Input Formats Given the nature of the training data, the Phi-3.5-mini-instruct model is best suited for prompts using the chat format as follows: ``` <|system|> You are a helpful assistant.<|end|> <|user|> How to explain Internet for a medieval knight?<|end|> <|assistant|> ``` ### Loading the model locally After obtaining the Phi-3.5-mini-instruct model checkpoint, users can use this sample code for inference. ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline torch.random.manual_seed(0) model = AutoModelForCausalLM.from_pretrained( "microsoft/Phi-3.5-mini-instruct", device_map="cuda", torch_dtype="auto", trust_remote_code=True, ) tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-mini-instruct") messages = [ {"role": "system", "content": "You are a helpful AI assistant."}, {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"}, {"role": "assistant", "content": "Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey."}, {"role": "user", "content": "What about solving an 2x + 3 = 7 equation?"}, ] pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, ) generation_args = { "max_new_tokens": 500, "return_full_text": False, "temperature": 0.0, "do_sample": False, } output = pipe(messages, **generation_args) print(output[0]['generated_text']) ``` Notes: If you want to use flash attention, call _AutoModelForCausalLM.from_pretrained()_ with _attn_implementation="flash_attention_2"_ ## Responsible AI Considerations Like other language models, the Phi family of models can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include: + Quality of Service: The Phi models are trained primarily on English text and some additional multilingual text. Languages other than English will experience worse performance as well as performance disparities across non-English. English language varieties with less representation in the training data might experience worse performance than standard American English. + Multilingual performance and safety gaps: We believe it is important to make language models more widely available across different languages, but the Phi 3 models still exhibit challenges common across multilingual releases. As with any deployment of LLMs, developers will be better positioned to test for performance or safety gaps for their linguistic and cultural context and customize the model with additional fine-tuning and appropriate safeguards. + Representation of Harms & Perpetuation of Stereotypes: These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups, cultural contexts, or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases. + Inappropriate or Offensive Content: These models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the case. + Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated. + Limited Scope for Code: Majority of Phi-3 training data is based in Python and use common packages such as "typing, math, random, collections, datetime, itertools". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses. + Long Conversation: Phi-3 models, like other models, can in some cases generate responses that are repetitive, unhelpful, or inconsistent in very long chat sessions in both English and non-English languages. Developers are encouraged to place appropriate mitigations, like limiting conversation turns to account for the possible conversational drift Developers should apply responsible AI best practices, including mapping, measuring, and mitigating risks associated with their specific use case and cultural, linguistic context. Phi-3 family of models are general purpose models. As developers plan to deploy these models for specific use cases, they are encouraged to fine-tune the models for their use case and leverage the models as part of broader AI systems with language-specific safeguards in place. Important areas for consideration include: + Allocation: Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques. + High-Risk Scenarios: Developers should assess the suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context. + Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG). + Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case. + Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations. ## Training ### Model **Architecture:** Phi-3.5-mini has 3.8B parameters and is a dense decoder-only Transformer model using the same tokenizer as Phi-3 Mini.<br> **Inputs:** Text. It is best suited for prompts using chat format.<br> **Context length:** 128K tokens<br> **GPUs:** 512 H100-80G<br> **Training time:** 10 days<br> **Training data:** 3.4T tokens<br> **Outputs:** Generated text in response to the input<br> **Dates:** Trained between June and August 2024<br> **Status:** This is a static model trained on an offline dataset with cutoff date October 2023 for publicly available data. Future versions of the tuned models may be released as we improve models.<br> **Supported languages:** Arabic, Chinese, Czech, Danish, Dutch, English, Finnish, French, German, Hebrew, Hungarian, Italian, Japanese, Korean, Norwegian, Polish, Portuguese, Russian, Spanish, Swedish, Thai, Turkish, Ukrainian<br> **Release date:** August 2024<br> ### Training Datasets Our training data includes a wide variety of sources, totaling 3.4 trillion tokens, and is a combination of 1) publicly available documents filtered rigorously for quality, selected high-quality educational data, and code; 2) newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (science, daily activities, theory of mind, etc.); 3) high quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness. We are focusing on the quality of data that could potentially improve the reasoning ability for the model, and we filter the publicly available documents to contain the correct level of knowledge. As an example, the result of a game in premier league in a particular day might be good training data for frontier models, but we need to remove such information to leave more model capacity for reasoning for the small size models. More details about data can be found in the [Phi-3 Technical Report](https://arxiv.org/pdf/2404.14219). ### Fine-tuning A basic example of multi-GPUs supervised fine-tuning (SFT) with TRL and Accelerate modules is provided [here](https://huggingface.co/microsoft/Phi-3.5-mini-instruct/resolve/main/sample_finetune.py). ## Benchmarks We report the results under completion format for Phi-3.5-mini on standard open-source benchmarks measuring the model's reasoning ability (both common sense reasoning and logical reasoning). We compare to Mistral-7B-Instruct-v0.3, Mistral-Nemo-12B-Ins-2407, Llama-3.1-8B-Ins, Gemma-2-9B-Ins, Gemini 1.5 Flash, and GPT-4o-mini-2024-07-18 (Chat). All the reported numbers are produced with the exact same pipeline to ensure that the numbers are comparable. These numbers might differ from other published numbers due to slightly different choices in the evaluation. As is now standard, we use few-shot prompts to evaluate the models, at temperature 0. The prompts and number of shots are part of a Microsoft internal tool to evaluate language models, and in particular we did no optimization to the pipeline for Phi-3. More specifically, we do not change prompts, pick different few-shot examples, change prompt format, or do any other form of optimization for the model. The number of k–shot examples is listed per-benchmark. At the high-level overview of the model quality on representative benchmarks: | Category | Benchmark | Phi-3.5 Mini-Ins | Mistral-7B-Instruct-v0.3 | Mistral-Nemo-12B-Ins-2407 | Llama-3.1-8B-Ins | Gemma-2-9B-Ins | Gemini 1.5 Flash | GPT-4o-mini-2024-07-18 (Chat) | |----------------|--------------------------|------------------|--------------------------|---------------------------|------------------|----------------|------------------|------------------------------| | Popular aggregated benchmark | Arena Hard | 37 | 18.1 | 39.4 | 25.7 | 42 | 55.2 | 75 | | | BigBench Hard CoT (0-shot) | 69 | 33.4 | 60.2 | 63.4 | 63.5 | 66.7 | 80.4 | | | MMLU (5-shot) | 69 | 60.3 | 67.2 | 68.1 | 71.3 | 78.7 | 77.2 | | | MMLU-Pro (0-shot, CoT) | 47.4 | 18 | 40.7 | 44 | 50.1 | 57.2 | 62.8 | | Reasoning | ARC Challenge (10-shot) | 84.6 | 77.9 | 84.8 | 83.1 | 89.8 | 92.8 | 93.5 | | | BoolQ (2-shot) | 78 | 80.5 | 82.5 | 82.8 | 85.7 | 85.8 | 88.7 | | | GPQA (0-shot, CoT) | 30.4 | 15.6 | 28.6 | 26.3 | 29.2 | 37.5 | 41.1 | | | HellaSwag (5-shot) | 69.4 | 71.6 | 76.7 | 73.5 | 80.9 | 67.5 | 87.1 | | | OpenBookQA (10-shot) | 79.2 | 78 | 84.4 | 84.8 | 89.6 | 89 | 90 | | | PIQA (5-shot) | 81 | 73.4 | 83.5 | 81.2 | 83.7 | 87.5 | 88.7 | | | Social IQA (5-shot) | 74.7 | 73 | 75.3 | 71.8 | 74.7 | 77.8 | 82.9 | | | TruthfulQA (MC2) (10-shot) | 64 | 64.7 | 68.1 | 69.2 | 76.6 | 76.6 | 78.2 | | | WinoGrande (5-shot) | 68.5 | 58.1 | 70.4 | 64.7 | 74 | 74.7 | 76.9 | | Multilingual | Multilingual MMLU (5-shot) | 55.4 | 47.4 | 58.9 | 56.2 | 63.8 | 77.2 | 72.9 | | | MGSM (0-shot CoT) | 47.9 | 31.8 | 63.3 | 56.7 | 76.4 | 75.8 | 81.7 | | Math | GSM8K (8-shot, CoT) | 86.2 | 54.4 | 84.2 | 82.4 | 84.9 | 82.4 | 91.3 | | | MATH (0-shot, CoT) | 48.5 | 19 | 31.2 | 47.6 | 50.9 | 38 | 70.2 | | Long context | Qasper | 41.9 | 31.4 | 30.7 | 37.2 | 13.9 | 43.5 | 39.8 | | | SQuALITY | 24.3 | 25.9 | 25.8 | 26.2 | 0 | 23.5 | 23.8 | | Code Generation| HumanEval (0-shot) | 62.8 | 35.4 | 63.4 | 66.5 | 61 | 74.4 | 86.6 | | | MBPP (3-shot) | 69.6 | 50.4 | 68.1 | 69.4 | 69.3 | 77.5 | 84.1 | | **Average** | | **61.4** | **48.5** | **61.3** | **61.0** | **63.3** | **68.5** | **74.9** | We take a closer look at different categories across public benchmark datasets at the table below: | Category | Phi-3.5 Mini-Ins | Mistral-7B-Instruct-v0.3 | Mistral-Nemo-12B-Ins-2407 | Llama-3.1-8B-Ins | Gemma-2-9B-Ins | Gemini 1.5 Flash | GPT-4o-mini-2024-07-18 (Chat) | |----------------------------|------------------|--------------------------|---------------------------|------------------|----------------|------------------|------------------------------| | Popular aggregated benchmark | 55.6 | 32.5 | 51.9 | 50.3 | 56.7 | 64.5 | 73.9 | | Reasoning | 70.1 | 65.2 | 72.2 | 70.5 | 75.4 | 77.7 | 80 | | Language understanding | 62.6 | 62.8 | 67 | 62.9 | 72.8 | 66.6 | 76.8 | | Robustness | 59.7 | 53.4 | 65.2 | 59.8 | 64.7 | 68.9 | 77.5 | | Long context | 26.1 | 25.5 | 24.4 | 24.5 | 0 | 27 | 25.4 | | Math | 67.4 | 36.7 | 57.7 | 65 | 67.9 | 60.2 | 80.8 | | Code generation | 62 | 43.1 | 56.9 | 65.8 | 58.3 | 66.8 | 69.9 | | Multilingual | 55.2 | 47.9 | 55.3 | 47.5 | 59.6 | 64.3 | 76.6 | Overall, the model with only 3.8B-param achieves a similar level of multilingual language understanding and reasoning ability as much larger models. However, it is still fundamentally limited by its size for certain tasks. The model simply does not have the capacity to store too much factual knowledge, therefore, users may experience factual incorrectness. However, we believe such weakness can be resolved by augmenting Phi-3.5 with a search engine, particularly when using the model under RAG settings. ## Safety Evaluation and Red-Teaming We leveraged various evaluation techniques including red teaming, adversarial conversation simulations, and multilingual safety evaluation benchmark datasets to evaluate Phi-3.5 models' propensity to produce undesirable outputs across multiple languages and risk categories. Several approaches were used to compensate for the limitations of one approach alone. Findings across the various evaluation methods indicate that safety post-training that was done as detailed in the [Phi-3 Safety Post-Training paper](https://arxiv.org/pdf/2407.13833) had a positive impact across multiple languages and risk categories as observed by refusal rates (refusal to output undesirable outputs) and robustness to jailbreak techniques. Note, however, while comprehensive red team evaluations were conducted across all models in the prior release of Phi models, red teaming was largely focused on Phi-3.5 MOE across multiple languages and risk categories for this release as it is the largest and more capable model of the three models. Details on prior red team evaluations across Phi models can be found in the [Phi-3 Safety Post-Training paper](https://arxiv.org/pdf/2407.13833). For this release, insights from red teaming indicate that the models may refuse to generate undesirable outputs in English, even when the request for undesirable output is in another language. Models may also be more susceptible to longer multi-turn jailbreak techniques across both English and non-English languages. These findings highlight the need for industry-wide investment in the development of high-quality safety evaluation datasets across multiple languages, including low resource languages, and risk areas that account for cultural nuances where those languages are spoken. ## Software * [PyTorch](https://github.com/pytorch/pytorch) * [Transformers](https://github.com/huggingface/transformers) * [Flash-Attention](https://github.com/HazyResearch/flash-attention) ## Hardware Note that by default, the Phi-3.5-mini-instruct model uses flash attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types: * NVIDIA A100 * NVIDIA A6000 * NVIDIA H100 If you want to run the model on: * NVIDIA V100 or earlier generation GPUs: call AutoModelForCausalLM.from_pretrained() with attn_implementation="eager" ## License The model is licensed under the [MIT license](./LICENSE). ## Trademarks This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies. ## Appendix A #### MGSM | Languages | Phi-3.5-Mini-Instruct | Phi-3.0-Mini-128k-Instruct (June2024) | Mistral-7B-Instruct-v0.3 | Mistral-Nemo-12B-Ins-2407 | Llama-3.1-8B-Ins | Gemma-2-9B-Ins | Gemini 1.5 Flash | GPT-4o-mini-2024-07-18 (Chat) | |-----------|------------------------|---------------------------------------|--------------------------|---------------------------|------------------|----------------|------------------|-------------------------------| | German | 69.6 | 65.2 | 42.4 | 74.4 | 68.4 | 76.8 | 81.6 | 82.8 | | English | 85.2 | 83.2 | 60.0 | 86.0 | 81.2 | 88.8 | 90.8 | 90.8 | | Spanish | 79.2 | 77.6 | 46.4 | 75.6 | 66.4 | 82.4 | 84.8 | 86.8 | | French | 71.6 | 72.8 | 47.2 | 70.4 | 66.8 | 74.4 | 77.2 | 81.6 | | Japanese | 50.0 | 35.2 | 22.8 | 62.4 | 49.2 | 67.6 | 77.6 | 80.4 | | Russian | 67.2 | 51.6 | 43.2 | 73.6 | 67.2 | 78.4 | 84.8 | 86.4 | | Thai | 29.6 | 6.4 | 18.4 | 53.2 | 56.0 | 76.8 | 87.6 | 81.6 | | Chinese | 60.0 | 52.8 | 42.4 | 66.4 | 68.0 | 72.8 | 82.0 | 82.0 | #### Multilingual MMLU-pro | Languages | Phi-3.5-Mini-Instruct | Phi-3.0-Mini-128k-Instruct (June2024) | Mistral-7B-Instruct-v0.3 | Mistral-Nemo-12B-Ins-2407 | Llama-3.1-8B-Ins | Gemma-2-9B-Ins | Gemini 1.5 Flash | GPT-4o-mini-2024-07-18 (Chat) | |------------|-----------------------|---------------------------------------|--------------------------|---------------------------|------------------|----------------|------------------|-------------------------------| | Czech | 24.9 | 26.3 | 14.6 | 30.6 | 23.0 | 40.5 | 59.0 | 40.9 | | English | 47.7 | 46.2 | 17.7 | 39.8 | 43.1 | 49.0 | 66.1 | 62.7 | | Finnish | 22.3 | 20.5 | 11.5 | 30.4 | 9.7 | 37.5 | 54.5 | 50.1 | | Norwegian | 29.9 | 27.8 | 14.4 | 33.2 | 22.2 | 44.4 | 60.7 | 59.1 | | Polish | 25.7 | 26.4 | 16.3 | 33.6 | 9.2 | 41.7 | 53.9 | 42.8 | | Portuguese | 38.7 | 37.6 | 15.3 | 36.0 | 29.3 | 43.5 | 54.0 | 56.9 | | Swedish | 30.7 | 28.1 | 15.5 | 34.3 | 16.9 | 42.6 | 57.7 | 55.5 | #### MEGA ##### MLQA | Languages | Phi-3.5-Mini-Instruct | Phi-3.0-Mini-128k-Instruct (June2024) | Mistral-7B-Instruct-v0.3 | Mistral-Nemo-12B-Ins-2407 | Llama-3.1-8B-Ins | Gemma-2-9B-Ins | Gemini 1.5 Flash | GPT-4o-mini-2024-07-18 (Chat) | |-----------|-----------------------|---------------------------------------|--------------------------|---------------------------|------------------|----------------|------------------|-------------------------------| | Arabic | 54.3 | 32.7 | 23.5 | 31.4 | 31.5 | 57.4 | 63.8 | 64.0 | | Chinese | 36.1 | 31.8 | 22.4 | 27.4 | 18.6 | 45.4 | 38.1 | 38.9 | | English | 80.3 | 78.9 | 68.2 | 75.5 | 67.2 | 82.9 | 69.5 | 82.2 | | German | 61.8 | 59.1 | 49.0 | 57.8 | 38.9 | 63.8 | 55.9 | 64.1 | | Spanish | 68.8 | 67.0 | 50.3 | 63.6 | 52.7 | 72.8 | 59.6 | 70.1 | ##### TyDi QA | Languages | Phi-3.5-Mini-Instruct | Phi-3.0-Mini-128k-Instruct (June2024) | Mistral-7B-Instruct-v0.3 | Mistral-Nemo-12B-Ins-2407 | Llama-3.1-8B-Ins | Gemma-2-9B-Ins | Gemini 1.5 Flash | GPT-4o-mini-2024-07-18 (Chat) | |-----------|-----------------------|---------------------------------------|--------------------------|---------------------------|------------------|----------------|------------------|-------------------------------| | Arabic | 69.7 | 54.4 | 52.5 | 49.8 | 33.7 | 81.1 | 78.8 | 84.9 | | English | 82.0 | 82.0 | 60.5 | 77.3 | 65.1 | 82.4 | 60.9 | 81.8 | | Finnish | 70.3 | 64.3 | 68.6 | 57.1 | 74.4 | 85.7 | 73.5 | 84.8 | | Japanese | 65.4 | 56.7 | 45.3 | 54.8 | 34.1 | 74.6 | 59.7 | 73.3 | | Korean | 74.0 | 60.4 | 54.5 | 54.2 | 54.9 | 83.8 | 60.7 | 82.3 | | Russian | 63.5 | 62.7 | 52.3 | 55.7 | 27.4 | 69.8 | 60.1 | 72.5 | | Thai | 64.4 | 49.0 | 51.8 | 43.5 | 48.5 | 81.4 | 71.6 | 78.2 | ##### XCOPA | Languages | Phi-3.5-Mini-Instruct | Phi-3.0-Mini-128k-Instruct (June2024) | Mistral-7B-Instruct-v0.3 | Mistral-Nemo-12B-Ins-2407 | Llama-3.1-8B-Ins | Gemma-2-9B-Ins | Gemini 1.5 Flash | GPT-4o-mini-2024-07-18 (Chat) | |-----------|-----------------------|---------------------------------------|--------------------------|---------------------------|------------------|----------------|------------------|-------------------------------| | English | 94.6 | 94.6 | 85.6 | 94.4 | 37.6 | 63.8 | 92.0 | 98.2 | | Italian | 86.8 | 84.8 | 76.8 | 83.2 | 16.2 | 37.2 | 85.6 | 97.6 | | Turkish | 58.6 | 57.2 | 61.6 | 56.6 | 38.4 | 60.2 | 91.4 | 94.6 |
hubertsiuzdak/snac_44khz
hubertsiuzdak
"2024-04-03T23:49:23Z"
85,687
5
transformers
[ "transformers", "pytorch", "audio", "license:mit", "endpoints_compatible", "region:us" ]
null
"2024-02-20T01:29:10Z"
--- license: mit tags: - audio --- # SNAC 🍿 Multi-**S**cale **N**eural **A**udio **C**odec (SNAC) compressess audio into discrete codes at a low bitrate. 👉 This model was primarily trained on music data, and its recommended use case is music (and SFX) generation. See below for other pretrained models. 🔗 GitHub repository: https://github.com/hubertsiuzdak/snac/ ## Overview SNAC encodes audio into hierarchical tokens similarly to SoundStream, EnCodec, and DAC. However, SNAC introduces a simple change where coarse tokens are sampled less frequently, covering a broader time span. This model compresses 44 kHz audio into discrete codes at a 2.6 kbps bitrate. It uses 4 RVQ levels with token rates of 14, 29, 57, and 115 Hz. ## Pretrained models Currently, all models support only single audio channel (mono). | Model | Bitrate | Sample Rate | Params | Recommended use case | |-----------------------------------------------------------------------------|-----------|-------------|--------|--------------------------| | [hubertsiuzdak/snac_24khz](https://huggingface.co/hubertsiuzdak/snac_24khz) | 0.98 kbps | 24 kHz | 19.8 M | 🗣️ Speech | | [hubertsiuzdak/snac_32khz](https://huggingface.co/hubertsiuzdak/snac_32khz) | 1.9 kbps | 32 kHz | 54.5 M | 🎸 Music / Sound Effects | | hubertsiuzdak/snac_44khz (this model) | 2.6 kbps | 44 kHz | 54.5 M | 🎸 Music / Sound Effects | ## Usage Install it using: ```bash pip install snac ``` To encode (and decode) audio with SNAC in Python, use the following code: ```python import torch from snac import SNAC model = SNAC.from_pretrained("hubertsiuzdak/snac_44khz").eval().cuda() audio = torch.randn(1, 1, 44100).cuda() # B, 1, T with torch.inference_mode(): codes = model.encode(audio) audio_hat = model.decode(codes) ``` You can also encode and reconstruct in a single call: ```python with torch.inference_mode(): audio_hat, codes = model(audio) ``` ⚠️ Note that `codes` is a list of token sequences of variable lengths, each corresponding to a different temporal resolution. ``` >>> [code.shape[1] for code in codes] [16, 32, 64, 128] ``` ## Acknowledgements Module definitions are adapted from the [Descript Audio Codec](https://github.com/descriptinc/descript-audio-codec).
immich-app/ViT-B-32__openai
immich-app
"2024-07-22T14:29:08Z"
85,593
7
transformers
[ "transformers", "onnx", "immich", "clip", "endpoints_compatible", "region:us" ]
null
"2023-10-28T00:34:04Z"
--- tags: - immich - clip --- # Model Description This repo contains ONNX exports for the CLIP model [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32). It separates the visual and textual encoders into separate models for the purpose of generating image and text embeddings. This repo is specifically intended for use with [Immich](https://immich.app/), a self-hosted photo library.
anuragshas/wav2vec2-large-xlsr-53-telugu
anuragshas
"2021-07-05T21:31:14Z"
85,590
5
transformers
[ "transformers", "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "audio", "speech", "xlsr-fine-tuning-week", "te", "dataset:openslr", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
"2022-03-02T23:29:05Z"
--- language: te datasets: - openslr metrics: - wer tags: - audio - automatic-speech-recognition - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: Anurag Singh XLSR Wav2Vec2 Large 53 Telugu results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: OpenSLR te type: openslr args: te metrics: - name: Test WER type: wer value: 44.98 --- # Wav2Vec2-Large-XLSR-53-Telugu Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Telugu using the [OpenSLR SLR66](http://openslr.org/66/) dataset. When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ```python import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import pandas as pd # Evaluation notebook contains the procedure to download the data df = pd.read_csv("/content/te/test.tsv", sep="\t") df["path"] = "/content/te/clips/" + df["path"] test_dataset = Dataset.from_pandas(df) processor = Wav2Vec2Processor.from_pretrained("anuragshas/wav2vec2-large-xlsr-53-telugu") model = Wav2Vec2ForCTC.from_pretrained("anuragshas/wav2vec2-large-xlsr-53-telugu") resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset["sentence"][:2]) ``` ## Evaluation ```python import torch import torchaudio from datasets import Dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import re from sklearn.model_selection import train_test_split import pandas as pd # Evaluation notebook contains the procedure to download the data df = pd.read_csv("/content/te/test.tsv", sep="\t") df["path"] = "/content/te/clips/" + df["path"] test_dataset = Dataset.from_pandas(df) wer = load_metric("wer") processor = Wav2Vec2Processor.from_pretrained("anuragshas/wav2vec2-large-xlsr-53-telugu") model = Wav2Vec2ForCTC.from_pretrained("anuragshas/wav2vec2-large-xlsr-53-telugu") model.to("cuda") chars_to_ignore_regex = '[\,\?\.\!\-\_\;\:\"\“\%\‘\”\।\’\'\&]' resampler = torchaudio.transforms.Resample(48_000, 16_000) def normalizer(text): # Use your custom normalizer text = text.replace("\\n","\n") text = ' '.join(text.split()) text = re.sub(r'''([a-z]+)''','',text,flags=re.IGNORECASE) text = re.sub(r'''%'''," శాతం ", text) text = re.sub(r'''(/|-|_)'''," ", text) text = re.sub("ై","ై", text) text = text.strip() return text def speech_file_to_array_fn(batch): batch["sentence"] = normalizer(batch["sentence"]) batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()+ " " speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # Preprocessing the datasets. # We need to read the aduio files as arrays def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result**: 44.98% ## Training 70% of the OpenSLR Telugu dataset was used for training. Train Split of annotations is [here](https://www.dropbox.com/s/xqc0wtour7f9h4c/train.tsv) Test Split of annotations is [here](https://www.dropbox.com/s/qw1uy63oj4qdiu4/test.tsv) Training Data Preparation notebook can be found [here](https://colab.research.google.com/drive/1_VR1QtY9qoiabyXBdJcOI29-xIKGdIzU?usp=sharing) Training notebook can be found[here](https://colab.research.google.com/drive/14N-j4m0Ng_oktPEBN5wiUhDDbyrKYt8I?usp=sharing) Evaluation notebook is [here](https://colab.research.google.com/drive/1SLEvbTWBwecIRTNqpQ0fFTqmr1-7MnSI?usp=sharing)
latent-consistency/lcm-lora-sdxl
latent-consistency
"2023-11-24T13:31:08Z"
84,690
719
diffusers
[ "diffusers", "lora", "text-to-image", "arxiv:2311.05556", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:openrail++", "region:us" ]
text-to-image
"2023-11-09T00:34:02Z"
--- library_name: diffusers base_model: stabilityai/stable-diffusion-xl-base-1.0 tags: - lora - text-to-image license: openrail++ inference: false --- # Latent Consistency Model (LCM) LoRA: SDXL Latent Consistency Model (LCM) LoRA was proposed in [LCM-LoRA: A universal Stable-Diffusion Acceleration Module](https://arxiv.org/abs/2311.05556) by *Simian Luo, Yiqin Tan, Suraj Patil, Daniel Gu et al.* It is a distilled consistency adapter for [`stable-diffusion-xl-base-1.0`](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) that allows to reduce the number of inference steps to only between **2 - 8 steps**. | Model | Params / M | |----------------------------------------------------------------------------|------------| | [lcm-lora-sdv1-5](https://huggingface.co/latent-consistency/lcm-lora-sdv1-5) | 67.5 | | [lcm-lora-ssd-1b](https://huggingface.co/latent-consistency/lcm-lora-ssd-1b) | 105 | | [**lcm-lora-sdxl**](https://huggingface.co/latent-consistency/lcm-lora-sdxl) | **197M** | ## Usage LCM-LoRA is supported in 🤗 Hugging Face Diffusers library from version v0.23.0 onwards. To run the model, first install the latest version of the Diffusers library as well as `peft`, `accelerate` and `transformers`. audio dataset from the Hugging Face Hub: ```bash pip install --upgrade pip pip install --upgrade diffusers transformers accelerate peft ``` ***Note: For detailed usage examples we recommend you to check out our official [LCM-LoRA docs](https://huggingface.co/docs/diffusers/main/en/using-diffusers/inference_with_lcm_lora)*** ### Text-to-Image The adapter can be loaded with it's base model `stabilityai/stable-diffusion-xl-base-1.0`. Next, the scheduler needs to be changed to [`LCMScheduler`](https://huggingface.co/docs/diffusers/v0.22.3/en/api/schedulers/lcm#diffusers.LCMScheduler) and we can reduce the number of inference steps to just 2 to 8 steps. Please make sure to either disable `guidance_scale` or use values between 1.0 and 2.0. ```python import torch from diffusers import LCMScheduler, AutoPipelineForText2Image model_id = "stabilityai/stable-diffusion-xl-base-1.0" adapter_id = "latent-consistency/lcm-lora-sdxl" pipe = AutoPipelineForText2Image.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16") pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") # load and fuse lcm lora pipe.load_lora_weights(adapter_id) pipe.fuse_lora() prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k" # disable guidance_scale by passing 0 image = pipe(prompt=prompt, num_inference_steps=4, guidance_scale=0).images[0] ``` ![](./image.png) ### Inpainting LCM-LoRA can be used for inpainting as well. ```python import torch from diffusers import AutoPipelineForInpainting, LCMScheduler from diffusers.utils import load_image, make_image_grid pipe = AutoPipelineForInpainting.from_pretrained( "diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16", ).to("cuda") # set scheduler pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) # load LCM-LoRA pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl") pipe.fuse_lora() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png").resize((1024, 1024)) mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png").resize((1024, 1024)) prompt = "a castle on top of a mountain, highly detailed, 8k" generator = torch.manual_seed(42) image = pipe( prompt=prompt, image=init_image, mask_image=mask_image, generator=generator, num_inference_steps=5, guidance_scale=4, ).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdxl_inpainting.png) ## Combine with styled LoRAs LCM-LoRA can be combined with other LoRAs to generate styled-images in very few steps (4-8). In the following example, we'll use the LCM-LoRA with the [papercut LoRA](TheLastBen/Papercut_SDXL). To learn more about how to combine LoRAs, refer to [this guide](https://huggingface.co/docs/diffusers/tutorials/using_peft_for_inference#combine-multiple-adapters). ```python import torch from diffusers import DiffusionPipeline, LCMScheduler pipe = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", variant="fp16", torch_dtype=torch.float16 ).to("cuda") # set scheduler pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) # load LoRAs pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl", adapter_name="lcm") pipe.load_lora_weights("TheLastBen/Papercut_SDXL", weight_name="papercut.safetensors", adapter_name="papercut") # Combine LoRAs pipe.set_adapters(["lcm", "papercut"], adapter_weights=[1.0, 0.8]) prompt = "papercut, a cute fox" generator = torch.manual_seed(0) image = pipe(prompt, num_inference_steps=4, guidance_scale=1, generator=generator).images[0] image ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdx_lora_mix.png) ### ControlNet ```python import torch import cv2 import numpy as np from PIL import Image from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, LCMScheduler from diffusers.utils import load_image image = load_image( "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" ).resize((1024, 1024)) image = np.array(image) low_threshold = 100 high_threshold = 200 image = cv2.Canny(image, low_threshold, high_threshold) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) canny_image = Image.fromarray(image) controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-canny-sdxl-1.0-small", torch_dtype=torch.float16, variant="fp16") pipe = StableDiffusionXLControlNetPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16, safety_checker=None, variant="fp16" ).to("cuda") # set scheduler pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) # load LCM-LoRA pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl") pipe.fuse_lora() generator = torch.manual_seed(0) image = pipe( "picture of the mona lisa", image=canny_image, num_inference_steps=5, guidance_scale=1.5, controlnet_conditioning_scale=0.5, cross_attention_kwargs={"scale": 1}, generator=generator, ).images[0] make_image_grid([canny_image, image], rows=1, cols=2) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdxl_controlnet.png) <Tip> The inference parameters in this example might not work for all examples, so we recommend you to try different values for `num_inference_steps`, `guidance_scale`, `controlnet_conditioning_scale` and `cross_attention_kwargs` parameters and choose the best one. </Tip> ### T2I Adapter This example shows how to use the LCM-LoRA with the [Canny T2I-Adapter](TencentARC/t2i-adapter-canny-sdxl-1.0) and SDXL. ```python import torch import cv2 import numpy as np from PIL import Image from diffusers import StableDiffusionXLAdapterPipeline, T2IAdapter, LCMScheduler from diffusers.utils import load_image, make_image_grid # Prepare image # Detect the canny map in low resolution to avoid high-frequency details image = load_image( "https://huggingface.co/Adapter/t2iadapter/resolve/main/figs_SDXLV1.0/org_canny.jpg" ).resize((384, 384)) image = np.array(image) low_threshold = 100 high_threshold = 200 image = cv2.Canny(image, low_threshold, high_threshold) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) canny_image = Image.fromarray(image).resize((1024, 1024)) # load adapter adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-canny-sdxl-1.0", torch_dtype=torch.float16, varient="fp16").to("cuda") pipe = StableDiffusionXLAdapterPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", adapter=adapter, torch_dtype=torch.float16, variant="fp16", ).to("cuda") # set scheduler pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) # load LCM-LoRA pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl") prompt = "Mystical fairy in real, magic, 4k picture, high quality" negative_prompt = "extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured" generator = torch.manual_seed(0) image = pipe( prompt=prompt, negative_prompt=negative_prompt, image=canny_image, num_inference_steps=4, guidance_scale=1.5, adapter_conditioning_scale=0.8, adapter_conditioning_factor=1, generator=generator, ).images[0] make_image_grid([canny_image, image], rows=1, cols=2) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdxl_t2iadapter.png) ## Speed Benchmark TODO ## Training TODO
prithivida/grammar_error_correcter_v1
prithivida
"2021-07-04T10:44:31Z"
84,663
36
transformers
[ "transformers", "pytorch", "t5", "text2text-generation", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text2text-generation
"2022-03-02T23:29:05Z"
**This model is part of the Gramformer library** please refer to https://github.com/PrithivirajDamodaran/Gramformer/
jinaai/jina-bert-flash-implementation
jinaai
"2024-05-31T12:41:05Z"
84,585
4
transformers
[ "transformers", "bert", "custom_code", "endpoints_compatible", "region:eu" ]
null
"2024-02-21T11:19:46Z"
# BERT with Flash-Attention ### Installing dependencies To run the model on GPU, you need to install Flash Attention. You may either install from pypi (which may not work with fused-dense), or from source. To install from source, clone the GitHub repository: ```console git clone git@github.com:Dao-AILab/flash-attention.git ``` The code provided here should work with commit `43950dd`. Change to the cloned repo and install: ```console cd flash-attention && python setup.py install ``` This will compile the flash-attention kernel, which will take some time. If you would like to use fused MLPs (e.g. to use activation checkpointing), you may install fused-dense also from source: ```console cd csrc/fused_dense_lib && python setup.py install ``` ### Configuration The config adds some new parameters: - `use_flash_attn`: If `True`, always use flash attention. If `None`, use flash attention when GPU is available. If `False`, never use flash attention (works on CPU). - `window_size`: Size (left and right) of the local attention window. If `(-1, -1)`, use global attention - `dense_seq_output`: If true, we only need to pass the hidden states for the masked out token (around 15%) to the classifier heads. I set this to true for pretraining. - `fused_mlp`: Whether to use fused-dense. Useful to reduce VRAM in combination with activation checkpointing - `mlp_checkpoint_lvl`: One of `{0, 1, 2}`. Increasing this increases the amount of activation checkpointing within the MLP. Keep this at 0 for pretraining and use gradient accumulation instead. For embedding training, increase this as much as needed. - `last_layer_subset`: If true, we only need the compute the last layer for a subset of tokens. I left this to false. - `use_qk_norm`: Whether or not to use QK-normalization - `num_loras`: Number of LoRAs to use when initializing a `BertLoRA` model. Has no effect on other models.
BAAI/bge-multilingual-gemma2
BAAI
"2024-07-31T08:07:09Z"
84,526
93
sentence-transformers
[ "sentence-transformers", "safetensors", "gemma2", "feature-extraction", "sentence-similarity", "transformers", "mteb", "arxiv:2402.03216", "arxiv:2309.07597", "license:gemma", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
feature-extraction
"2024-07-25T16:55:46Z"
--- tags: - feature-extraction - sentence-similarity - sentence-transformers - transformers - mteb license: gemma model-index: - name: bge-multilingual-gemma2 results: - task: type: Retrieval dataset: type: mteb/nfcorpus name: MTEB NFCorpus config: default split: test revision: ec0fa4fe99da2ff19ca1214b7966684033a58814 metrics: - type: main_score value: 38.11433513284057 - type: ndcg_at_1 value: 48.45201238390093 - type: ndcg_at_3 value: 44.451438575534574 - type: ndcg_at_5 value: 41.13929990797894 - type: ndcg_at_10 value: 38.11433513284057 - type: ndcg_at_100 value: 35.36065387898559 - type: ndcg_at_1000 value: 44.01125752781003 - type: map_at_1 value: 5.638004398054564 - type: map_at_3 value: 10.375632572339333 - type: map_at_5 value: 11.820531148202422 - type: map_at_10 value: 14.087436978063389 - type: map_at_100 value: 18.25397463114958 - type: map_at_1000 value: 19.868440221606203 - type: precision_at_1 value: 49.84520123839009 - type: precision_at_3 value: 41.89886480908153 - type: precision_at_5 value: 35.356037151702814 - type: precision_at_10 value: 28.513931888544857 - type: precision_at_100 value: 9.337461300309604 - type: precision_at_1000 value: 2.210216718266251 - type: recall_at_1 value: 5.638004398054564 - type: recall_at_3 value: 11.938154656310312 - type: recall_at_5 value: 14.06183119422843 - type: recall_at_10 value: 18.506397834147705 - type: recall_at_100 value: 35.96995569451433 - type: recall_at_1000 value: 68.31771509404795 - task: type: Retrieval dataset: type: mteb/msmarco name: MTEB MSMARCO config: default split: dev revision: c5a29a104738b98a9e76336939199e264163d4a0 metrics: - type: main_score value: 45.70688915742828 - type: ndcg_at_1 value: 26.002865329512893 - type: ndcg_at_3 value: 37.49665652114275 - type: ndcg_at_5 value: 41.684045067615834 - type: ndcg_at_10 value: 45.70688915742828 - type: ndcg_at_100 value: 51.08932609519671 - type: ndcg_at_1000 value: 51.98806137292924 - type: map_at_1 value: 25.35219675262655 - type: map_at_3 value: 34.39549506526583 - type: map_at_5 value: 36.74936326010824 - type: map_at_10 value: 38.44429852488596 - type: map_at_100 value: 39.60260286311527 - type: map_at_1000 value: 39.64076154054021 - type: precision_at_1 value: 26.002865329512893 - type: precision_at_3 value: 15.840496657115954 - type: precision_at_5 value: 11.647564469914684 - type: precision_at_10 value: 7.1275071633243705 - type: precision_at_100 value: 0.9782234957019871 - type: precision_at_1000 value: 0.10565902578797497 - type: recall_at_1 value: 25.35219675262655 - type: recall_at_3 value: 45.78438395415474 - type: recall_at_5 value: 55.83213944603631 - type: recall_at_10 value: 68.08500477554918 - type: recall_at_100 value: 92.55133715377269 - type: recall_at_1000 value: 99.29083094555875 - task: type: Retrieval dataset: type: mteb/fiqa name: MTEB FiQA2018 config: default split: test revision: 27a168819829fe9bcd655c2df245fb19452e8e06 metrics: - type: main_score value: 60.04205769404706 - type: ndcg_at_1 value: 59.25925925925925 - type: ndcg_at_3 value: 55.96637679199298 - type: ndcg_at_5 value: 56.937223390223956 - type: ndcg_at_10 value: 60.04205769404706 - type: ndcg_at_100 value: 66.01619664462949 - type: ndcg_at_1000 value: 67.59651529720728 - type: map_at_1 value: 31.5081163692275 - type: map_at_3 value: 45.7486689836227 - type: map_at_5 value: 48.944906602314 - type: map_at_10 value: 51.85427043799874 - type: map_at_100 value: 53.92920237379484 - type: map_at_1000 value: 54.04694438963671 - type: precision_at_1 value: 59.25925925925925 - type: precision_at_3 value: 37.44855967078195 - type: precision_at_5 value: 26.913580246913547 - type: precision_at_10 value: 16.52777777777774 - type: precision_at_100 value: 2.2962962962962754 - type: precision_at_1000 value: 0.2566358024691334 - type: recall_at_1 value: 31.5081163692275 - type: recall_at_3 value: 50.71759045138676 - type: recall_at_5 value: 57.49321152098932 - type: recall_at_10 value: 67.36356750245642 - type: recall_at_100 value: 88.67335767798735 - type: recall_at_1000 value: 97.83069725199356 - task: type: Retrieval dataset: type: mteb/scidocs name: MTEB SCIDOCS config: default split: test revision: f8c2fcf00f625baaa80f62ec5bd9e1fff3b8ae88 metrics: - type: main_score value: 26.93150756480961 - type: ndcg_at_1 value: 30.8 - type: ndcg_at_3 value: 25.048085553386628 - type: ndcg_at_5 value: 22.351207380852305 - type: ndcg_at_10 value: 26.93150756480961 - type: ndcg_at_100 value: 37.965486832874014 - type: ndcg_at_1000 value: 43.346046425140244 - type: map_at_1 value: 6.238333333333366 - type: map_at_3 value: 11.479166666666679 - type: map_at_5 value: 14.215999999999983 - type: map_at_10 value: 16.774632936507945 - type: map_at_100 value: 20.148869158557293 - type: map_at_1000 value: 20.528644104490823 - type: precision_at_1 value: 30.8 - type: precision_at_3 value: 23.466666666666736 - type: precision_at_5 value: 19.899999999999967 - type: precision_at_10 value: 14.069999999999938 - type: precision_at_100 value: 2.9770000000000065 - type: precision_at_1000 value: 0.42569999999999486 - type: recall_at_1 value: 6.238333333333366 - type: recall_at_3 value: 14.29333333333338 - type: recall_at_5 value: 20.206666666666628 - type: recall_at_10 value: 28.573333333333224 - type: recall_at_100 value: 60.43666666666675 - type: recall_at_1000 value: 86.3649999999997 - task: type: Retrieval dataset: type: mteb/fever name: MTEB FEVER config: default split: test revision: bea83ef9e8fb933d90a2f1d5515737465d613e12 metrics: - type: main_score value: 90.38165339181239 - type: ndcg_at_1 value: 84.86348634863486 - type: ndcg_at_3 value: 88.98667069230609 - type: ndcg_at_5 value: 89.86028996734895 - type: ndcg_at_10 value: 90.38165339181239 - type: ndcg_at_100 value: 90.99655378684439 - type: ndcg_at_1000 value: 91.15536362599602 - type: map_at_1 value: 78.8556296105801 - type: map_at_3 value: 86.24061810942983 - type: map_at_5 value: 86.94776680048933 - type: map_at_10 value: 87.26956235873007 - type: map_at_100 value: 87.47986397174834 - type: map_at_1000 value: 87.4897076664281 - type: precision_at_1 value: 84.86348634863486 - type: precision_at_3 value: 34.02340234023296 - type: precision_at_5 value: 21.10411041104359 - type: precision_at_10 value: 10.828082808282083 - type: precision_at_100 value: 1.1381638163816703 - type: precision_at_1000 value: 0.11662166216622569 - type: recall_at_1 value: 78.8556296105801 - type: recall_at_3 value: 92.34465708475605 - type: recall_at_5 value: 94.58010682020583 - type: recall_at_10 value: 96.10713452297611 - type: recall_at_100 value: 98.31672452959585 - type: recall_at_1000 value: 99.25967001462051 - task: type: Retrieval dataset: type: mteb/arguana name: MTEB ArguAna config: default split: test revision: c22ab2a51041ffd869aaddef7af8d8215647e41a metrics: - type: main_score value: 77.36555747844541 - type: ndcg_at_1 value: 57.681365576102415 - type: ndcg_at_3 value: 72.01664798084765 - type: ndcg_at_5 value: 75.26345973082836 - type: ndcg_at_10 value: 77.36555747844541 - type: ndcg_at_100 value: 78.15567833673768 - type: ndcg_at_1000 value: 78.16528851292641 - type: map_at_1 value: 57.681365576102415 - type: map_at_3 value: 68.59886201991475 - type: map_at_5 value: 70.38051209103858 - type: map_at_10 value: 71.26684955632336 - type: map_at_100 value: 71.4637216600468 - type: map_at_1000 value: 71.46414501573332 - type: precision_at_1 value: 57.681365576102415 - type: precision_at_3 value: 27.287814129919084 - type: precision_at_5 value: 17.965860597439132 - type: precision_at_10 value: 9.623044096728066 - type: precision_at_100 value: 0.995732574679925 - type: precision_at_1000 value: 0.09964438122332549 - type: recall_at_1 value: 57.681365576102415 - type: recall_at_3 value: 81.86344238975818 - type: recall_at_5 value: 89.82930298719772 - type: recall_at_10 value: 96.23044096728307 - type: recall_at_100 value: 99.57325746799431 - type: recall_at_1000 value: 99.6443812233286 - task: type: Retrieval dataset: type: mteb/scifact name: MTEB SciFact config: default split: test revision: 0228b52cf27578f30900b9e5271d331663a030d7 metrics: - type: main_score value: 72.0465439956427 - type: ndcg_at_1 value: 58.666666666666664 - type: ndcg_at_3 value: 66.84566274610046 - type: ndcg_at_5 value: 69.46578881873717 - type: ndcg_at_10 value: 72.0465439956427 - type: ndcg_at_100 value: 74.25705461923272 - type: ndcg_at_1000 value: 74.63689058493014 - type: map_at_1 value: 55.59444444444445 - type: map_at_3 value: 63.71851851851852 - type: map_at_5 value: 65.5362962962963 - type: map_at_10 value: 66.84112433862435 - type: map_at_100 value: 67.36269426417417 - type: map_at_1000 value: 67.37568665562833 - type: precision_at_1 value: 58.666666666666664 - type: precision_at_3 value: 26.444444444444425 - type: precision_at_5 value: 17.66666666666672 - type: precision_at_10 value: 9.866666666666706 - type: precision_at_100 value: 1.0966666666666596 - type: precision_at_1000 value: 0.11266666666666675 - type: recall_at_1 value: 55.59444444444445 - type: recall_at_3 value: 72.72777777777777 - type: recall_at_5 value: 79.31666666666666 - type: recall_at_10 value: 86.75 - type: recall_at_100 value: 96.66666666666667 - type: recall_at_1000 value: 99.66666666666667 - task: type: Retrieval dataset: type: mteb/trec-covid name: MTEB TRECCOVID config: default split: test revision: bb9466bac8153a0349341eb1b22e06409e78ef4e metrics: - type: main_score value: 64.26928884606035 - type: ndcg_at_1 value: 63.0 - type: ndcg_at_3 value: 64.18432764386345 - type: ndcg_at_5 value: 64.73235515799435 - type: ndcg_at_10 value: 64.26928884606035 - type: ndcg_at_100 value: 52.39807133285409 - type: ndcg_at_1000 value: 52.19937563361241 - type: map_at_1 value: 0.18483494997310454 - type: map_at_3 value: 0.5139705769331114 - type: map_at_5 value: 0.8245601222717243 - type: map_at_10 value: 1.5832530269558573 - type: map_at_100 value: 9.664760850102393 - type: map_at_1000 value: 25.568347406468334 - type: precision_at_1 value: 70.0 - type: precision_at_3 value: 71.33333333333333 - type: precision_at_5 value: 71.60000000000001 - type: precision_at_10 value: 70.99999999999996 - type: precision_at_100 value: 55.140000000000015 - type: precision_at_1000 value: 23.857999999999997 - type: recall_at_1 value: 0.18483494997310454 - type: recall_at_3 value: 0.5584287301859913 - type: recall_at_5 value: 0.9489025953807098 - type: recall_at_10 value: 1.9023711039425688 - type: recall_at_100 value: 13.596810701594226 - type: recall_at_1000 value: 50.92058432920189 - task: type: Retrieval dataset: type: mteb/climate-fever name: MTEB ClimateFEVER config: default split: test revision: 47f2ac6acb640fc46020b02a5b59fdda04d39380 metrics: - type: main_score value: 39.37204193531481 - type: ndcg_at_1 value: 35.11400651465798 - type: ndcg_at_3 value: 32.36672790229743 - type: ndcg_at_5 value: 34.79369234162357 - type: ndcg_at_10 value: 39.37204193531481 - type: ndcg_at_100 value: 47.544500439419124 - type: ndcg_at_1000 value: 50.305733346049855 - type: map_at_1 value: 15.516829533116216 - type: map_at_3 value: 23.73669923995656 - type: map_at_5 value: 26.43208469055373 - type: map_at_10 value: 28.912036175309773 - type: map_at_100 value: 31.413762299240894 - type: map_at_1000 value: 31.596796093997014 - type: precision_at_1 value: 35.11400651465798 - type: precision_at_3 value: 24.994571118349487 - type: precision_at_5 value: 19.231270358305956 - type: precision_at_10 value: 12.690553745928165 - type: precision_at_100 value: 2.1576547231270466 - type: precision_at_1000 value: 0.2676221498371306 - type: recall_at_1 value: 15.516829533116216 - type: recall_at_3 value: 29.994571118349512 - type: recall_at_5 value: 37.14223669923993 - type: recall_at_10 value: 47.29207383279043 - type: recall_at_100 value: 74.37133550488598 - type: recall_at_1000 value: 89.41585233441913 - task: type: Retrieval dataset: type: mteb/hotpotqa name: MTEB HotpotQA config: default split: test revision: ab518f4d6fcca38d87c25209f94beba119d02014 metrics: - type: main_score value: 83.26282954330777 - type: ndcg_at_1 value: 87.5489534098582 - type: ndcg_at_3 value: 78.7646435855166 - type: ndcg_at_5 value: 81.41629077444277 - type: ndcg_at_10 value: 83.26282954330777 - type: ndcg_at_100 value: 85.2771369900158 - type: ndcg_at_1000 value: 85.77519303747493 - type: map_at_1 value: 43.7744767049291 - type: map_at_3 value: 73.4661264911093 - type: map_at_5 value: 75.7169705154168 - type: map_at_10 value: 76.89183627536043 - type: map_at_100 value: 77.53680315727078 - type: map_at_1000 value: 77.5649311522075 - type: precision_at_1 value: 87.5489534098582 - type: precision_at_3 value: 51.74881836596788 - type: precision_at_5 value: 33.13977042539127 - type: precision_at_10 value: 17.492234976369023 - type: precision_at_100 value: 1.9030384875084312 - type: precision_at_1000 value: 0.19679945982446267 - type: recall_at_1 value: 43.7744767049291 - type: recall_at_3 value: 77.62322754895341 - type: recall_at_5 value: 82.84942606347063 - type: recall_at_10 value: 87.4611748818366 - type: recall_at_100 value: 95.15192437542201 - type: recall_at_1000 value: 98.39972991222147 - task: type: Retrieval dataset: type: mteb/nq name: MTEB NQ config: default split: test revision: b774495ed302d8c44a3a7ea25c90dbce03968f31 metrics: - type: main_score value: 71.44670934705796 - type: ndcg_at_1 value: 54.026651216685984 - type: ndcg_at_3 value: 65.1267452491225 - type: ndcg_at_5 value: 68.6696802020747 - type: ndcg_at_10 value: 71.44670934705796 - type: ndcg_at_100 value: 73.74642927386503 - type: ndcg_at_1000 value: 73.90908268307331 - type: map_at_1 value: 48.50086906141366 - type: map_at_3 value: 61.07691193510995 - type: map_at_5 value: 63.36580243337187 - type: map_at_10 value: 64.74485498782997 - type: map_at_100 value: 65.34329174534082 - type: map_at_1000 value: 65.35107870745652 - type: precision_at_1 value: 54.026651216685984 - type: precision_at_3 value: 28.437620702974996 - type: precision_at_5 value: 19.20625724217861 - type: precision_at_10 value: 10.67207415990753 - type: precision_at_100 value: 1.1987253765932955 - type: precision_at_1000 value: 0.12143684820393259 - type: recall_at_1 value: 48.50086906141366 - type: recall_at_3 value: 73.19428350714561 - type: recall_at_5 value: 81.19689069138664 - type: recall_at_10 value: 89.04741212823485 - type: recall_at_100 value: 98.58053302433372 - type: recall_at_1000 value: 99.75376593279258 - task: type: Retrieval dataset: type: mteb/quora name: MTEB QuoraRetrieval config: default split: test revision: e4e08e0b7dbe3c8700f0daef558ff32256715259 metrics: - type: main_score value: 90.03760323006117 - type: ndcg_at_1 value: 83.53 - type: ndcg_at_3 value: 87.53800795646302 - type: ndcg_at_5 value: 88.92909168525203 - type: ndcg_at_10 value: 90.03760323006117 - type: ndcg_at_100 value: 91.08558507332712 - type: ndcg_at_1000 value: 91.1430039358834 - type: map_at_1 value: 72.61760432018744 - type: map_at_3 value: 83.8457060028347 - type: map_at_5 value: 85.6228412692169 - type: map_at_10 value: 86.67700531365115 - type: map_at_100 value: 87.29851728827602 - type: map_at_1000 value: 87.31014621733333 - type: precision_at_1 value: 83.53 - type: precision_at_3 value: 38.33666666667159 - type: precision_at_5 value: 25.12599999999881 - type: precision_at_10 value: 13.629999999998683 - type: precision_at_100 value: 1.5431999999999773 - type: precision_at_1000 value: 0.15671999999997974 - type: recall_at_1 value: 72.61760432018744 - type: recall_at_3 value: 89.06736052932686 - type: recall_at_5 value: 93.09634203522849 - type: recall_at_10 value: 96.35128012894234 - type: recall_at_100 value: 99.7740237858541 - type: recall_at_1000 value: 99.99690476190477 - task: type: Retrieval dataset: type: mteb/webis-touche2020 name: MTEB Touche2020 config: default split: test revision: a34f9a33db75fa0cbb21bb5cfc3dae8dc8bec93f metrics: - type: main_score value: 30.2563523019649 - type: ndcg_at_1 value: 37.755102040816325 - type: ndcg_at_3 value: 34.45349994459905 - type: ndcg_at_5 value: 32.508805919063086 - type: ndcg_at_10 value: 30.2563523019649 - type: ndcg_at_100 value: 40.538336664503746 - type: ndcg_at_1000 value: 52.2066951614923 - type: map_at_1 value: 2.75537988273998 - type: map_at_3 value: 6.011397290504469 - type: map_at_5 value: 8.666495836494098 - type: map_at_10 value: 12.17701515007822 - type: map_at_100 value: 18.789086471205852 - type: map_at_1000 value: 20.42972375502502 - type: precision_at_1 value: 40.816326530612244 - type: precision_at_3 value: 35.37414965986394 - type: precision_at_5 value: 32.244897959183675 - type: precision_at_10 value: 26.93877551020408 - type: precision_at_100 value: 8.163265306122451 - type: precision_at_1000 value: 1.5979591836734703 - type: recall_at_1 value: 2.75537988273998 - type: recall_at_3 value: 7.254270324385098 - type: recall_at_5 value: 11.580137100328589 - type: recall_at_10 value: 18.745232816450553 - type: recall_at_100 value: 50.196809658622755 - type: recall_at_1000 value: 85.87317364148332 - task: type: Retrieval dataset: type: mteb/dbpedia name: MTEB DBPedia config: default split: test revision: c0f706b76e590d620bd6618b3ca8efdd34e2d659 metrics: - type: main_score value: 51.36940792375597 - type: ndcg_at_1 value: 65.125 - type: ndcg_at_3 value: 55.3967569049025 - type: ndcg_at_5 value: 53.09668587926677 - type: ndcg_at_10 value: 51.36940792375597 - type: ndcg_at_100 value: 56.69623269243084 - type: ndcg_at_1000 value: 63.481061270842 - type: map_at_1 value: 10.265595545755545 - type: map_at_3 value: 16.776544233350698 - type: map_at_5 value: 20.184523605272798 - type: map_at_10 value: 24.772797659849264 - type: map_at_100 value: 36.72689012514183 - type: map_at_1000 value: 38.73869985105569 - type: precision_at_1 value: 77.5 - type: precision_at_3 value: 59.75000000000003 - type: precision_at_5 value: 52.849999999999994 - type: precision_at_10 value: 42.47499999999995 - type: precision_at_100 value: 13.614999999999993 - type: precision_at_1000 value: 2.500749999999998 - type: recall_at_1 value: 10.265595545755545 - type: recall_at_3 value: 17.819804963534246 - type: recall_at_5 value: 22.46124219601634 - type: recall_at_10 value: 30.44583516613163 - type: recall_at_100 value: 63.84118006287797 - type: recall_at_1000 value: 85.06450356093833 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackRetrieval config: default split: test revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4 metrics: - type: main_score value: 47.93921415959017 - type: ndcg_at_1 value: 36.526219490536015 - type: ndcg_at_3 value: 42.35099043224295 - type: ndcg_at_5 value: 44.989685312964156 - type: ndcg_at_10 value: 47.93921415959017 - type: ndcg_at_100 value: 53.05390282389675 - type: ndcg_at_1000 value: 54.776052731794266 - type: map_at_1 value: 30.818605279548184 - type: map_at_3 value: 38.363350019087974 - type: map_at_5 value: 40.295203936887226 - type: map_at_10 value: 41.81978941662592 - type: map_at_100 value: 43.13300727554278 - type: map_at_1000 value: 43.2351061120207 - type: precision_at_1 value: 36.526219490536015 - type: precision_at_3 value: 19.550515857206346 - type: precision_at_5 value: 13.958783060831967 - type: precision_at_10 value: 8.498592395773393 - type: precision_at_100 value: 1.3024888941713948 - type: precision_at_1000 value: 0.1630253057414617 - type: recall_at_1 value: 30.818605279548184 - type: recall_at_3 value: 45.9132085981904 - type: recall_at_5 value: 52.6851323959227 - type: recall_at_10 value: 61.39718618970463 - type: recall_at_100 value: 83.30757187969981 - type: recall_at_1000 value: 94.9192024147964 - dataset: config: en name: MTEB AmazonCounterfactualClassification (en) revision: e8379541af4e31359cca9fbcf4b00f2671dba205 split: test type: mteb/amazon_counterfactual metrics: - type: accuracy value: 89.47761194029852 - type: accuracy_stderr value: 1.6502495811564162 - type: ap value: 62.20813715457866 - type: ap_stderr value: 3.7902166647587854 - type: f1 value: 84.91493292274734 - type: f1_stderr value: 1.9572239640276208 - type: main_score value: 89.47761194029852 task: type: Classification - dataset: config: default name: MTEB AmazonPolarityClassification revision: e2d317d38cd51312af73b3d32a06d1a08b442046 split: test type: mteb/amazon_polarity metrics: - type: accuracy value: 96.89569999999999 - type: accuracy_stderr value: 0.6886368582206464 - type: ap value: 95.38531339207739 - type: ap_stderr value: 0.9009257949898158 - type: f1 value: 96.8941935264779 - type: f1_stderr value: 0.6908609132985931 - type: main_score value: 96.89569999999999 task: type: Classification - dataset: config: en name: MTEB AmazonReviewsClassification (en) revision: 1399c76144fd37290681b995c656ef9b2e06e26d split: test type: mteb/amazon_reviews_multi metrics: - type: accuracy value: 61.602000000000004 - type: accuracy_stderr value: 1.4532019818318436 - type: f1 value: 60.96100449021481 - type: f1_stderr value: 1.8031398419765765 - type: main_score value: 61.602000000000004 task: type: Classification - dataset: config: default name: MTEB ArxivClusteringP2P revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d split: test type: mteb/arxiv-clustering-p2p metrics: - type: main_score value: 54.906319409992 - type: v_measure value: 54.906319409992 - type: v_measure_std value: 14.382682652951683 task: type: Clustering - dataset: config: default name: MTEB ArxivClusteringS2S revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 split: test type: mteb/arxiv-clustering-s2s metrics: - type: main_score value: 50.27779516565727 - type: v_measure value: 50.27779516565727 - type: v_measure_std value: 14.463711418590636 task: type: Clustering - dataset: config: default name: MTEB AskUbuntuDupQuestions revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 split: test type: mteb/askubuntudupquestions-reranking metrics: - type: map value: 64.59457317979604 - type: mrr value: 78.05214791364376 - type: main_score value: 64.59457317979604 task: type: Reranking - dataset: config: default name: MTEB BIOSSES revision: d3fb88f8f02e40887cd149695127462bbcf29b4a split: test type: mteb/biosses-sts metrics: - type: cosine_pearson value: 86.5833945335644 - type: cosine_spearman value: 85.74472483606 - type: manhattan_pearson value: 85.07748703871708 - type: manhattan_spearman value: 85.1459160110718 - type: euclidean_pearson value: 85.14704290043478 - type: euclidean_spearman value: 85.10073425868336 - type: main_score value: 85.74472483606 task: type: STS - dataset: config: default name: MTEB Banking77Classification revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 split: test type: mteb/banking77 metrics: - type: accuracy value: 92.53246753246755 - type: accuracy_stderr value: 0.5488837781559508 - type: f1 value: 92.5143182074032 - type: f1_stderr value: 0.5657577980223147 - type: main_score value: 92.53246753246755 task: type: Classification - dataset: config: default name: MTEB BiorxivClusteringP2P revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 split: test type: mteb/biorxiv-clustering-p2p metrics: - type: main_score value: 52.64099497480452 - type: v_measure value: 52.64099497480452 - type: v_measure_std value: 1.081892399559334 task: type: Clustering - dataset: config: default name: MTEB BiorxivClusteringS2S revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 split: test type: mteb/biorxiv-clustering-s2s metrics: - type: main_score value: 49.1972734308178 - type: v_measure value: 49.1972734308178 - type: v_measure_std value: 0.9081245477708283 task: type: Clustering - dataset: config: default name: MTEB EmotionClassification revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 split: test type: mteb/emotion metrics: - type: accuracy value: 92.975 - type: accuracy_stderr value: 0.5287958017987677 - type: f1 value: 89.29755895896542 - type: f1_stderr value: 0.6485027046025079 - type: main_score value: 92.975 task: type: Classification - dataset: config: default name: MTEB ImdbClassification revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 split: test type: mteb/imdb metrics: - type: accuracy value: 96.66480000000001 - type: accuracy_stderr value: 0.45673204398202666 - type: ap value: 95.33843919456118 - type: ap_stderr value: 0.6449846039754393 - type: f1 value: 96.6637668164617 - type: f1_stderr value: 0.45793673051468287 - type: main_score value: 96.66480000000001 task: type: Classification - dataset: config: en name: MTEB MTOPDomainClassification (en) revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf split: test type: mteb/mtop_domain metrics: - type: accuracy value: 98.61149110807114 - type: accuracy_stderr value: 0.469748178253266 - type: f1 value: 98.4685511007568 - type: f1_stderr value: 0.51636776728259 - type: main_score value: 98.61149110807114 task: type: Classification - dataset: config: en name: MTEB MTOPIntentClassification (en) revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba split: test type: mteb/mtop_intent metrics: - type: accuracy value: 95.51299589603283 - type: accuracy_stderr value: 0.3591676911539482 - type: f1 value: 85.2464691439773 - type: f1_stderr value: 0.9234502856695337 - type: main_score value: 95.51299589603283 task: type: Classification - dataset: config: en name: MTEB MassiveIntentClassification (en) revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 split: test type: mteb/amazon_massive_intent metrics: - type: accuracy value: 82.04774714189644 - type: accuracy_stderr value: 0.7288818520309376 - type: f1 value: 79.28060657840692 - type: f1_stderr value: 0.6872008571781982 - type: main_score value: 82.04774714189644 task: type: Classification - dataset: config: en name: MTEB MassiveScenarioClassification (en) revision: 7d571f92784cd94a019292a1f45445077d0ef634 split: test type: mteb/amazon_massive_scenario metrics: - type: accuracy value: 84.40147948890383 - type: accuracy_stderr value: 1.2939587629143627 - type: f1 value: 83.97779287582267 - type: f1_stderr value: 0.9970599222060901 - type: main_score value: 84.40147948890383 task: type: Classification - dataset: config: default name: MTEB MedrxivClusteringP2P revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 split: test type: mteb/medrxiv-clustering-p2p metrics: - type: main_score value: 45.80879120838561 - type: v_measure value: 45.80879120838561 - type: v_measure_std value: 1.257800489264564 task: type: Clustering - dataset: config: default name: MTEB MedrxivClusteringS2S revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 split: test type: mteb/medrxiv-clustering-s2s metrics: - type: main_score value: 44.106849261042505 - type: v_measure value: 44.106849261042505 - type: v_measure_std value: 1.4347344477874981 task: type: Clustering - dataset: config: default name: MTEB MindSmallReranking revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 split: test type: mteb/mind_small metrics: - type: map value: 31.794062752995345 - type: mrr value: 32.98581714772614 - type: main_score value: 31.794062752995345 task: type: Reranking - dataset: config: default name: MTEB RedditClustering revision: 24640382cdbf8abc73003fb0fa6d111a705499eb split: test type: mteb/reddit-clustering metrics: - type: main_score value: 56.03342473834434 - type: v_measure value: 56.03342473834434 - type: v_measure_std value: 5.972192613803461 task: type: Clustering - dataset: config: default name: MTEB RedditClusteringP2P revision: 282350215ef01743dc01b456c7f5241fa8937f16 split: test type: mteb/reddit-clustering-p2p metrics: - type: main_score value: 65.83156688381274 - type: v_measure value: 65.83156688381274 - type: v_measure_std value: 14.180225112120162 task: type: Clustering - dataset: config: default name: MTEB SICK-R revision: a6ea5a8cab320b040a23452cc28066d9beae2cee split: test type: mteb/sickr-sts metrics: - type: cosine_pearson value: 84.15759544348467 - type: cosine_spearman value: 82.66085892322664 - type: manhattan_pearson value: 82.27257241990692 - type: manhattan_spearman value: 82.57752467555896 - type: euclidean_pearson value: 82.20795646456065 - type: euclidean_spearman value: 82.51008729416401 - type: main_score value: 82.66085892322664 task: type: STS - dataset: config: default name: MTEB STS12 revision: a0d554a64d88156834ff5ae9920b964011b16384 split: test type: mteb/sts12-sts metrics: - type: cosine_pearson value: 84.3406321391237 - type: cosine_spearman value: 77.71091257651071 - type: manhattan_pearson value: 81.25784268400994 - type: manhattan_spearman value: 77.98426383345507 - type: euclidean_pearson value: 81.25641851462917 - type: euclidean_spearman value: 77.93254971878063 - type: main_score value: 77.71091257651071 task: type: STS - dataset: config: default name: MTEB STS13 revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca split: test type: mteb/sts13-sts metrics: - type: cosine_pearson value: 86.1528398894769 - type: cosine_spearman value: 87.44662352358895 - type: manhattan_pearson value: 86.92164570802663 - type: manhattan_spearman value: 86.9132692625668 - type: euclidean_pearson value: 87.00156426580821 - type: euclidean_spearman value: 86.98750068631274 - type: main_score value: 87.44662352358895 task: type: STS - dataset: config: default name: MTEB STS14 revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 split: test type: mteb/sts14-sts metrics: - type: cosine_pearson value: 83.32782491176253 - type: cosine_spearman value: 83.48313793311584 - type: manhattan_pearson value: 82.60528063429948 - type: manhattan_spearman value: 83.10434862310481 - type: euclidean_pearson value: 82.68016090104034 - type: euclidean_spearman value: 83.14418662406631 - type: main_score value: 83.48313793311584 task: type: STS - dataset: config: default name: MTEB STS15 revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 split: test type: mteb/sts15-sts metrics: - type: cosine_pearson value: 86.31535441436343 - type: cosine_spearman value: 87.63145141246594 - type: manhattan_pearson value: 86.95972711389149 - type: manhattan_spearman value: 86.9849824463052 - type: euclidean_pearson value: 86.95391575487379 - type: euclidean_spearman value: 86.97613682266213 - type: main_score value: 87.63145141246594 task: type: STS - dataset: config: default name: MTEB STS16 revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 split: test type: mteb/sts16-sts metrics: - type: cosine_pearson value: 83.43854397443079 - type: cosine_spearman value: 86.70176531845136 - type: manhattan_pearson value: 85.82302317064868 - type: manhattan_spearman value: 86.36561734213241 - type: euclidean_pearson value: 85.80127366135169 - type: euclidean_spearman value: 86.34803859754834 - type: main_score value: 86.70176531845136 task: type: STS - dataset: config: en-en name: MTEB STS17 (en-en) revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d split: test type: mteb/sts17-crosslingual-sts metrics: - type: cosine_pearson value: 90.38940955877999 - type: cosine_spearman value: 91.18282119920893 - type: manhattan_pearson value: 91.31823663739615 - type: manhattan_spearman value: 90.67257321731341 - type: euclidean_pearson value: 91.30318753138528 - type: euclidean_spearman value: 90.69044765693836 - type: main_score value: 91.18282119920893 task: type: STS - dataset: config: en name: MTEB STS22 (en) revision: eea2b4fe26a775864c896887d910b76a8098ad3f split: test type: mteb/sts22-crosslingual-sts metrics: - type: cosine_pearson value: 69.33936467780947 - type: cosine_spearman value: 69.02345807358802 - type: manhattan_pearson value: 70.11799452953082 - type: manhattan_spearman value: 68.55450923481405 - type: euclidean_pearson value: 70.10857680491809 - type: euclidean_spearman value: 68.44610245708984 - type: main_score value: 69.02345807358802 task: type: STS - dataset: config: default name: MTEB STSBenchmark revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 split: test type: mteb/stsbenchmark-sts metrics: - type: cosine_pearson value: 85.97288135509513 - type: cosine_spearman value: 87.25208310840168 - type: manhattan_pearson value: 86.3786471501451 - type: manhattan_spearman value: 86.71177136523868 - type: euclidean_pearson value: 86.40522339296625 - type: euclidean_spearman value: 86.73930576508816 - type: main_score value: 87.25208310840168 task: type: STS - dataset: config: default name: MTEB SciDocsRR revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab split: test type: mteb/scidocs-reranking metrics: - type: map value: 87.60324164489178 - type: mrr value: 96.30331904841708 - type: main_score value: 87.60324164489178 task: type: Reranking - dataset: config: default name: MTEB SprintDuplicateQuestions revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 split: test type: mteb/sprintduplicatequestions-pairclassification metrics: - type: cos_sim_accuracy value: 99.6920792079208 - type: cos_sim_accuracy_threshold value: 90.36337347155474 - type: cos_sim_ap value: 90.93952679056765 - type: cos_sim_f1 value: 83.10700706137968 - type: cos_sim_f1_threshold value: 90.36337347155474 - type: cos_sim_precision value: 90.96313912009512 - type: cos_sim_recall value: 76.5 - type: dot_accuracy value: 99.54554455445545 - type: dot_accuracy_threshold value: 2876800.0 - type: dot_ap value: 84.01112287735286 - type: dot_f1 value: 75.7622739018088 - type: dot_f1_threshold value: 2820800.0 - type: dot_precision value: 78.39572192513369 - type: dot_recall value: 73.3 - type: euclidean_accuracy value: 99.6930693069307 - type: euclidean_accuracy_threshold value: 7718.054017089397 - type: euclidean_ap value: 91.1257568881301 - type: euclidean_f1 value: 83.09022150189087 - type: euclidean_f1_threshold value: 7817.08324628535 - type: euclidean_precision value: 90.36427732079906 - type: euclidean_recall value: 76.9 - type: manhattan_accuracy value: 99.6920792079208 - type: manhattan_accuracy_threshold value: 364735.19654273987 - type: manhattan_ap value: 91.2326885940691 - type: manhattan_f1 value: 83.36008560727663 - type: manhattan_f1_threshold value: 375395.8945572376 - type: manhattan_precision value: 89.64326812428078 - type: manhattan_recall value: 77.9 - type: max_accuracy value: 99.6930693069307 - type: max_ap value: 91.2326885940691 - type: max_f1 value: 83.36008560727663 task: type: PairClassification - dataset: config: default name: MTEB StackExchangeClustering revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 split: test type: mteb/stackexchange-clustering metrics: - type: main_score value: 66.2095300942637 - type: v_measure value: 66.2095300942637 - type: v_measure_std value: 3.214369679617631 task: type: Clustering - dataset: config: default name: MTEB StackExchangeClusteringP2P revision: 815ca46b2622cec33ccafc3735d572c266efdb44 split: test type: mteb/stackexchange-clustering-p2p metrics: - type: main_score value: 45.74307000935057 - type: v_measure value: 45.74307000935057 - type: v_measure_std value: 1.5352466748569888 task: type: Clustering - dataset: config: default name: MTEB StackOverflowDupQuestions revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 split: test type: mteb/stackoverflowdupquestions-reranking metrics: - type: map value: 54.90337951829123 - type: mrr value: 56.12889663441134 - type: main_score value: 54.90337951829123 task: type: Reranking - dataset: config: default name: MTEB SummEval revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c split: test type: mteb/summeval metrics: - type: cosine_pearson value: 31.0669308484832 - type: cosine_spearman value: 31.19637421540861 - type: dot_pearson value: 30.62326176666765 - type: dot_spearman value: 30.42135737502967 - type: main_score value: 31.19637421540861 task: type: Summarization - dataset: config: default name: MTEB ToxicConversationsClassification revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c split: test type: mteb/toxic_conversations_50k metrics: - type: accuracy value: 87.34339999999999 - type: accuracy_stderr value: 1.838245696309393 - type: ap value: 33.536584790435406 - type: ap_stderr value: 2.276373512492581 - type: f1 value: 72.47307082324448 - type: f1_stderr value: 1.9964640292072542 - type: main_score value: 87.34339999999999 task: type: Classification - dataset: config: default name: MTEB TweetSentimentExtractionClassification revision: d604517c81ca91fe16a244d1248fc021f9ecee7a split: test type: mteb/tweet_sentiment_extraction metrics: - type: accuracy value: 78.86247877758915 - type: accuracy_stderr value: 1.1273253738982443 - type: f1 value: 79.14666244848874 - type: f1_stderr value: 1.1532640958036497 - type: main_score value: 78.86247877758915 task: type: Classification - dataset: config: default name: MTEB TwentyNewsgroupsClustering revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 split: test type: mteb/twentynewsgroups-clustering metrics: - type: main_score value: 70.44270836680788 - type: v_measure value: 70.44270836680788 - type: v_measure_std value: 1.5185423698266132 task: type: Clustering - dataset: config: default name: MTEB TwitterSemEval2015 revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 split: test type: mteb/twittersemeval2015-pairclassification metrics: - type: cos_sim_accuracy value: 87.74512725755498 - type: cos_sim_accuracy_threshold value: 82.34941560483547 - type: cos_sim_ap value: 79.6389274210382 - type: cos_sim_f1 value: 71.76319176319176 - type: cos_sim_f1_threshold value: 80.1523829249257 - type: cos_sim_precision value: 70.0502512562814 - type: cos_sim_recall value: 73.56200527704485 - type: dot_accuracy value: 85.13441020444657 - type: dot_accuracy_threshold value: 2220800.0 - type: dot_ap value: 71.67080150823449 - type: dot_f1 value: 66.18984119287187 - type: dot_f1_threshold value: 2086400.0 - type: dot_precision value: 61.224489795918366 - type: dot_recall value: 72.0316622691293 - type: euclidean_accuracy value: 87.69148238660071 - type: euclidean_accuracy_threshold value: 9221.50036619459 - type: euclidean_ap value: 79.65326151280289 - type: euclidean_f1 value: 71.7903489983621 - type: euclidean_f1_threshold value: 10313.528386219872 - type: euclidean_precision value: 68.70026525198939 - type: euclidean_recall value: 75.17150395778364 - type: manhattan_accuracy value: 87.74512725755498 - type: manhattan_accuracy_threshold value: 444289.1119837761 - type: manhattan_ap value: 79.67744645365104 - type: manhattan_f1 value: 71.94423699278066 - type: manhattan_f1_threshold value: 491676.24004781246 - type: manhattan_precision value: 68.0961357210179 - type: manhattan_recall value: 76.2532981530343 - type: max_accuracy value: 87.74512725755498 - type: max_ap value: 79.67744645365104 - type: max_f1 value: 71.94423699278066 task: type: PairClassification - dataset: config: default name: MTEB TwitterURLCorpus revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf split: test type: mteb/twitterurlcorpus-pairclassification metrics: - type: cos_sim_accuracy value: 89.5544688943222 - type: cos_sim_accuracy_threshold value: 81.58909533293946 - type: cos_sim_ap value: 86.95174990178396 - type: cos_sim_f1 value: 79.1543756145526 - type: cos_sim_f1_threshold value: 80.08573448087095 - type: cos_sim_precision value: 77.78355879292404 - type: cos_sim_recall value: 80.5743763473976 - type: dot_accuracy value: 88.60752124810804 - type: dot_accuracy_threshold value: 2136000.0 - type: dot_ap value: 84.26724775947629 - type: dot_f1 value: 77.67666146985243 - type: dot_f1_threshold value: 2064000.0 - type: dot_precision value: 73.40505721921468 - type: dot_recall value: 82.47613181398214 - type: euclidean_accuracy value: 89.5370046959289 - type: euclidean_accuracy_threshold value: 9750.113991666478 - type: euclidean_ap value: 86.99393092403776 - type: euclidean_f1 value: 79.07167337207571 - type: euclidean_f1_threshold value: 10338.095928500366 - type: euclidean_precision value: 76.59497690531177 - type: euclidean_recall value: 81.71388974437943 - type: manhattan_accuracy value: 89.57581402569178 - type: manhattan_accuracy_threshold value: 463812.92815208435 - type: manhattan_ap value: 87.00849868076658 - type: manhattan_f1 value: 79.08583576933297 - type: manhattan_f1_threshold value: 482453.35128605366 - type: manhattan_precision value: 78.00494270950348 - type: manhattan_recall value: 80.19710502001848 - type: max_accuracy value: 89.57581402569178 - type: max_ap value: 87.00849868076658 - type: max_f1 value: 79.1543756145526 task: type: PairClassification - dataset: config: default name: MTEB AFQMC revision: b44c3b011063adb25877c13823db83bb193913c4 split: validation type: C-MTEB/AFQMC metrics: - type: cosine_pearson value: 45.108559635369325 - type: cosine_spearman value: 47.172833128216176 - type: manhattan_pearson value: 45.75443077564791 - type: manhattan_spearman value: 47.13974146235398 - type: euclidean_pearson value: 45.78921257223492 - type: euclidean_spearman value: 47.177095238278625 - type: main_score value: 47.172833128216176 task: type: STS - dataset: config: default name: MTEB ATEC revision: 0f319b1142f28d00e055a6770f3f726ae9b7d865 split: test type: C-MTEB/ATEC metrics: - type: cosine_pearson value: 48.304409578388466 - type: cosine_spearman value: 50.75006977697012 - type: manhattan_pearson value: 52.688818756177035 - type: manhattan_spearman value: 50.739214155741095 - type: euclidean_pearson value: 52.71788557204978 - type: euclidean_spearman value: 50.77895730336448 - type: main_score value: 50.75006977697012 task: type: STS - dataset: config: zh name: MTEB AmazonReviewsClassification (zh) revision: 1399c76144fd37290681b995c656ef9b2e06e26d split: test type: mteb/amazon_reviews_multi metrics: - type: accuracy value: 54.339999999999996 - type: accuracy_stderr value: 1.6518837731511269 - type: f1 value: 53.37316538790502 - type: f1_stderr value: 1.6112926272861336 - type: main_score value: 54.339999999999996 task: type: Classification - dataset: config: default name: MTEB BQ revision: e3dda5e115e487b39ec7e618c0c6a29137052a55 split: test type: C-MTEB/BQ metrics: - type: cosine_pearson value: 59.62831218167518 - type: cosine_spearman value: 62.02213472473759 - type: manhattan_pearson value: 61.122261197018176 - type: manhattan_spearman value: 62.208780520694454 - type: euclidean_pearson value: 61.17827629627213 - type: euclidean_spearman value: 62.266859648664244 - type: main_score value: 62.02213472473759 task: type: STS - dataset: config: default name: MTEB CLSClusteringP2P revision: 4b6227591c6c1a73bc76b1055f3b7f3588e72476 split: test type: C-MTEB/CLSClusteringP2P metrics: - type: main_score value: 54.64518394835408 - type: v_measure value: 54.64518394835408 - type: v_measure_std value: 1.2745946640208072 task: type: Clustering - dataset: config: default name: MTEB CLSClusteringS2S revision: e458b3f5414b62b7f9f83499ac1f5497ae2e869f split: test type: C-MTEB/CLSClusteringS2S metrics: - type: main_score value: 63.68323477729556 - type: v_measure value: 63.68323477729556 - type: v_measure_std value: 1.740918833098302 task: type: Clustering - dataset: config: default name: MTEB CMedQAv1 revision: 8d7f1e942507dac42dc58017c1a001c3717da7df split: test type: C-MTEB/CMedQAv1-reranking metrics: - type: map value: 84.61500884703916 - type: mrr value: 87.01424603174604 - type: main_score value: 84.61500884703916 task: type: Reranking - dataset: config: default name: MTEB CMedQAv2 revision: 23d186750531a14a0357ca22cd92d712fd512ea0 split: test type: C-MTEB/CMedQAv2-reranking metrics: - type: map value: 85.60137988993483 - type: mrr value: 87.96857142857142 - type: main_score value: 85.60137988993483 task: type: Reranking - dataset: config: default name: MTEB CmedqaRetrieval revision: cd540c506dae1cf9e9a59c3e06f42030d54e7301 split: dev type: C-MTEB/CmedqaRetrieval metrics: - type: map_at_1 value: 24.191 - type: map_at_10 value: 35.819 - type: map_at_100 value: 37.639 - type: map_at_1000 value: 37.775 - type: map_at_3 value: 32.045 - type: map_at_5 value: 34.008 - type: mrr_at_1 value: 36.684 - type: mrr_at_10 value: 44.769 - type: mrr_at_100 value: 45.754 - type: mrr_at_1000 value: 45.809 - type: mrr_at_3 value: 42.465 - type: mrr_at_5 value: 43.696 - type: ndcg_at_1 value: 36.834 - type: ndcg_at_10 value: 42.208 - type: ndcg_at_100 value: 49.507 - type: ndcg_at_1000 value: 51.834 - type: ndcg_at_3 value: 37.416 - type: ndcg_at_5 value: 39.152 - type: precision_at_1 value: 36.834 - type: precision_at_10 value: 9.357 - type: precision_at_100 value: 1.5310000000000001 - type: precision_at_1000 value: 0.183 - type: precision_at_3 value: 21.08 - type: precision_at_5 value: 15.068999999999999 - type: recall_at_1 value: 24.191 - type: recall_at_10 value: 52.078 - type: recall_at_100 value: 82.548 - type: recall_at_1000 value: 98.017 - type: recall_at_3 value: 37.484 - type: recall_at_5 value: 43.187 - type: main_score value: 42.208 task: type: Retrieval - dataset: config: default name: MTEB Cmnli revision: 41bc36f332156f7adc9e38f53777c959b2ae9766 split: validation type: C-MTEB/CMNLI metrics: - type: cos_sim_accuracy value: 81.98436560432953 - type: cos_sim_accuracy_threshold value: 67.33228049687503 - type: cos_sim_ap value: 90.13312662430796 - type: cos_sim_f1 value: 83.2163938077737 - type: cos_sim_f1_threshold value: 64.44945196171463 - type: cos_sim_precision value: 79.45555082943429 - type: cos_sim_recall value: 87.350946925415 - type: dot_accuracy value: 80.50511124473843 - type: dot_accuracy_threshold value: 1736000.0 - type: dot_ap value: 88.76136186445322 - type: dot_f1 value: 81.75838631878973 - type: dot_f1_threshold value: 1681600.0 - type: dot_precision value: 76.96594427244582 - type: dot_recall value: 87.18728080430208 - type: euclidean_accuracy value: 82.21286831028262 - type: euclidean_accuracy_threshold value: 13240.938473272565 - type: euclidean_ap value: 90.14863232280865 - type: euclidean_f1 value: 83.277292086976 - type: euclidean_f1_threshold value: 13667.852165734186 - type: euclidean_precision value: 79.97847147470398 - type: euclidean_recall value: 86.85994856207621 - type: manhattan_accuracy value: 82.21286831028262 - type: manhattan_accuracy_threshold value: 629412.1389746666 - type: manhattan_ap value: 90.03868533208357 - type: manhattan_f1 value: 83.15683870248579 - type: manhattan_f1_threshold value: 649621.3114321232 - type: manhattan_precision value: 79.46314443971026 - type: manhattan_recall value: 87.21066167874679 - type: max_accuracy value: 82.21286831028262 - type: max_ap value: 90.14863232280865 - type: max_f1 value: 83.277292086976 task: type: PairClassification - dataset: config: default name: MTEB CovidRetrieval revision: 1271c7809071a13532e05f25fb53511ffce77117 split: dev type: C-MTEB/CovidRetrieval metrics: - type: map_at_1 value: 65.595 - type: map_at_10 value: 73.717 - type: map_at_100 value: 74.134 - type: map_at_1000 value: 74.143 - type: map_at_3 value: 71.97 - type: map_at_5 value: 73.11800000000001 - type: mrr_at_1 value: 65.648 - type: mrr_at_10 value: 73.618 - type: mrr_at_100 value: 74.02499999999999 - type: mrr_at_1000 value: 74.033 - type: mrr_at_3 value: 71.865 - type: mrr_at_5 value: 73.04 - type: ndcg_at_1 value: 65.753 - type: ndcg_at_10 value: 77.458 - type: ndcg_at_100 value: 79.46 - type: ndcg_at_1000 value: 79.666 - type: ndcg_at_3 value: 73.988 - type: ndcg_at_5 value: 76.038 - type: precision_at_1 value: 65.753 - type: precision_at_10 value: 8.999 - type: precision_at_100 value: 0.9939999999999999 - type: precision_at_1000 value: 0.101 - type: precision_at_3 value: 26.765 - type: precision_at_5 value: 17.092 - type: recall_at_1 value: 65.595 - type: recall_at_10 value: 89.041 - type: recall_at_100 value: 98.31400000000001 - type: recall_at_1000 value: 99.895 - type: recall_at_3 value: 79.768 - type: recall_at_5 value: 84.66799999999999 - type: main_score value: 77.458 task: type: Retrieval - dataset: config: default name: MTEB DuRetrieval revision: a1a333e290fe30b10f3f56498e3a0d911a693ced split: dev type: C-MTEB/DuRetrieval metrics: - type: map_at_1 value: 27.248 - type: map_at_10 value: 84.303 - type: map_at_100 value: 86.866 - type: map_at_1000 value: 86.888 - type: map_at_3 value: 58.658 - type: map_at_5 value: 74.265 - type: mrr_at_1 value: 92.2 - type: mrr_at_10 value: 94.733 - type: mrr_at_100 value: 94.767 - type: mrr_at_1000 value: 94.768 - type: mrr_at_3 value: 94.492 - type: mrr_at_5 value: 94.627 - type: ndcg_at_1 value: 92.2 - type: ndcg_at_10 value: 90.462 - type: ndcg_at_100 value: 92.562 - type: ndcg_at_1000 value: 92.757 - type: ndcg_at_3 value: 89.44800000000001 - type: ndcg_at_5 value: 88.683 - type: precision_at_1 value: 92.2 - type: precision_at_10 value: 42.980000000000004 - type: precision_at_100 value: 4.851 - type: precision_at_1000 value: 0.49 - type: precision_at_3 value: 80.233 - type: precision_at_5 value: 67.95 - type: recall_at_1 value: 27.248 - type: recall_at_10 value: 91.46600000000001 - type: recall_at_100 value: 98.566 - type: recall_at_1000 value: 99.557 - type: recall_at_3 value: 60.671 - type: recall_at_5 value: 78.363 - type: main_score value: 90.462 task: type: Retrieval - dataset: config: default name: MTEB EcomRetrieval revision: 687de13dc7294d6fd9be10c6945f9e8fec8166b9 split: dev type: C-MTEB/EcomRetrieval metrics: - type: map_at_1 value: 54.7 - type: map_at_10 value: 64.574 - type: map_at_100 value: 65.144 - type: map_at_1000 value: 65.156 - type: map_at_3 value: 62.333000000000006 - type: map_at_5 value: 63.63799999999999 - type: mrr_at_1 value: 54.7 - type: mrr_at_10 value: 64.603 - type: mrr_at_100 value: 65.172 - type: mrr_at_1000 value: 65.184 - type: mrr_at_3 value: 62.383 - type: mrr_at_5 value: 63.683 - type: ndcg_at_1 value: 54.7 - type: ndcg_at_10 value: 69.298 - type: ndcg_at_100 value: 71.81 - type: ndcg_at_1000 value: 72.117 - type: ndcg_at_3 value: 64.72099999999999 - type: ndcg_at_5 value: 67.071 - type: precision_at_1 value: 54.7 - type: precision_at_10 value: 8.41 - type: precision_at_100 value: 0.9530000000000001 - type: precision_at_1000 value: 0.098 - type: precision_at_3 value: 23.867 - type: precision_at_5 value: 15.459999999999999 - type: recall_at_1 value: 54.7 - type: recall_at_10 value: 84.1 - type: recall_at_100 value: 95.3 - type: recall_at_1000 value: 97.7 - type: recall_at_3 value: 71.6 - type: recall_at_5 value: 77.3 - type: main_score value: 69.298 task: type: Retrieval - dataset: config: default name: MTEB IFlyTek revision: 421605374b29664c5fc098418fe20ada9bd55f8a split: validation type: C-MTEB/IFlyTek-classification metrics: - type: accuracy value: 49.942285494420936 - type: accuracy_stderr value: 0.9218275144833329 - type: f1 value: 41.32381790374152 - type: f1_stderr value: 0.8291507105327707 - type: main_score value: 49.942285494420936 task: type: Classification - dataset: config: default name: MTEB JDReview revision: b7c64bd89eb87f8ded463478346f76731f07bf8b split: test type: C-MTEB/JDReview-classification metrics: - type: accuracy value: 88.91181988742964 - type: accuracy_stderr value: 1.952391767940518 - type: ap value: 60.18509628974178 - type: ap_stderr value: 4.273060966573582 - type: f1 value: 84.02722221827027 - type: f1_stderr value: 2.238197243395083 - type: main_score value: 88.91181988742964 task: type: Classification - dataset: config: default name: MTEB LCQMC revision: 17f9b096f80380fce5ed12a9be8be7784b337daf split: test type: C-MTEB/LCQMC metrics: - type: cosine_pearson value: 68.32691294171383 - type: cosine_spearman value: 75.95458618586729 - type: manhattan_pearson value: 74.37198807732018 - type: manhattan_spearman value: 75.99352157963375 - type: euclidean_pearson value: 74.36294627886716 - type: euclidean_spearman value: 75.98632511635132 - type: main_score value: 75.95458618586729 task: type: STS - dataset: config: default name: MTEB MMarcoReranking revision: 8e0c766dbe9e16e1d221116a3f36795fbade07f6 split: dev type: C-MTEB/Mmarco-reranking metrics: - type: map value: 35.4327533126161 - type: mrr value: 34.61507936507937 - type: main_score value: 35.4327533126161 task: type: Reranking - dataset: config: default name: MTEB MMarcoRetrieval revision: 539bbde593d947e2a124ba72651aafc09eb33fc2 split: dev type: C-MTEB/MMarcoRetrieval metrics: - type: map_at_1 value: 72.652 - type: map_at_10 value: 81.396 - type: map_at_100 value: 81.597 - type: map_at_1000 value: 81.60300000000001 - type: map_at_3 value: 79.757 - type: map_at_5 value: 80.798 - type: mrr_at_1 value: 75.01400000000001 - type: mrr_at_10 value: 81.842 - type: mrr_at_100 value: 82.025 - type: mrr_at_1000 value: 82.03099999999999 - type: mrr_at_3 value: 80.45400000000001 - type: mrr_at_5 value: 81.345 - type: ndcg_at_1 value: 74.98599999999999 - type: ndcg_at_10 value: 84.70100000000001 - type: ndcg_at_100 value: 85.568 - type: ndcg_at_1000 value: 85.721 - type: ndcg_at_3 value: 81.64099999999999 - type: ndcg_at_5 value: 83.375 - type: precision_at_1 value: 74.98599999999999 - type: precision_at_10 value: 10.049 - type: precision_at_100 value: 1.047 - type: precision_at_1000 value: 0.106 - type: precision_at_3 value: 30.458000000000002 - type: precision_at_5 value: 19.206 - type: recall_at_1 value: 72.652 - type: recall_at_10 value: 94.40899999999999 - type: recall_at_100 value: 98.241 - type: recall_at_1000 value: 99.42 - type: recall_at_3 value: 86.354 - type: recall_at_5 value: 90.472 - type: main_score value: 84.70100000000001 task: type: Retrieval - dataset: config: zh-CN name: MTEB MassiveIntentClassification (zh-CN) revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 split: test type: mteb/amazon_massive_intent metrics: - type: accuracy value: 78.19098856758575 - type: accuracy_stderr value: 0.6325028678427684 - type: f1 value: 74.80611425574001 - type: f1_stderr value: 0.9021806207904779 - type: main_score value: 78.19098856758575 task: type: Classification - dataset: config: zh-CN name: MTEB MassiveScenarioClassification (zh-CN) revision: 7d571f92784cd94a019292a1f45445077d0ef634 split: test type: mteb/amazon_massive_scenario metrics: - type: accuracy value: 82.58238063214526 - type: accuracy_stderr value: 1.0999970213165273 - type: f1 value: 81.94734854057064 - type: f1_stderr value: 1.248633855872851 - type: main_score value: 82.58238063214526 task: type: Classification - dataset: config: default name: MTEB MedicalRetrieval revision: 2039188fb5800a9803ba5048df7b76e6fb151fc6 split: dev type: C-MTEB/MedicalRetrieval metrics: - type: map_at_1 value: 53.7 - type: map_at_10 value: 59.184000000000005 - type: map_at_100 value: 59.754 - type: map_at_1000 value: 59.8 - type: map_at_3 value: 57.833 - type: map_at_5 value: 58.548 - type: mrr_at_1 value: 54.0 - type: mrr_at_10 value: 59.352000000000004 - type: mrr_at_100 value: 59.926 - type: mrr_at_1000 value: 59.971 - type: mrr_at_3 value: 57.99999999999999 - type: mrr_at_5 value: 58.714999999999996 - type: ndcg_at_1 value: 53.7 - type: ndcg_at_10 value: 62.022 - type: ndcg_at_100 value: 65.038 - type: ndcg_at_1000 value: 66.366 - type: ndcg_at_3 value: 59.209 - type: ndcg_at_5 value: 60.51299999999999 - type: precision_at_1 value: 53.7 - type: precision_at_10 value: 7.1 - type: precision_at_100 value: 0.856 - type: precision_at_1000 value: 0.096 - type: precision_at_3 value: 21.067 - type: precision_at_5 value: 13.28 - type: recall_at_1 value: 53.7 - type: recall_at_10 value: 71.0 - type: recall_at_100 value: 85.6 - type: recall_at_1000 value: 96.3 - type: recall_at_3 value: 63.2 - type: recall_at_5 value: 66.4 - type: main_score value: 62.022 task: type: Retrieval - dataset: config: default name: MTEB MultilingualSentiment revision: 46958b007a63fdbf239b7672c25d0bea67b5ea1a split: validation type: C-MTEB/MultilingualSentiment-classification metrics: - type: accuracy value: 78.91333333333334 - type: accuracy_stderr value: 1.0834307648494321 - type: f1 value: 78.881433228092 - type: f1_stderr value: 1.122457277013712 - type: main_score value: 78.91333333333334 task: type: Classification - dataset: config: default name: MTEB Ocnli revision: 66e76a618a34d6d565d5538088562851e6daa7ec split: validation type: C-MTEB/OCNLI metrics: - type: cos_sim_accuracy value: 76.39415268002165 - type: cos_sim_accuracy_threshold value: 68.98242139321592 - type: cos_sim_ap value: 83.20687440058073 - type: cos_sim_f1 value: 78.4351145038168 - type: cos_sim_f1_threshold value: 65.47409929698304 - type: cos_sim_precision value: 71.54046997389034 - type: cos_sim_recall value: 86.80042238648363 - type: dot_accuracy value: 74.60747157552788 - type: dot_accuracy_threshold value: 1737600.0 - type: dot_ap value: 79.78938545919723 - type: dot_f1 value: 76.92307692307692 - type: dot_f1_threshold value: 1652800.0 - type: dot_precision value: 67.90622473726758 - type: dot_recall value: 88.70116156283 - type: euclidean_accuracy value: 76.34001082837032 - type: euclidean_accuracy_threshold value: 12597.299662420446 - type: euclidean_ap value: 83.60222701792158 - type: euclidean_f1 value: 78.77947295423024 - type: euclidean_f1_threshold value: 13639.653702639469 - type: euclidean_precision value: 70.06578947368422 - type: euclidean_recall value: 89.96832101372756 - type: manhattan_accuracy value: 76.23172712506768 - type: manhattan_accuracy_threshold value: 587601.2824743986 - type: manhattan_ap value: 83.51813426548178 - type: manhattan_f1 value: 78.6654135338346 - type: manhattan_f1_threshold value: 639711.1931562424 - type: manhattan_precision value: 70.87214225232854 - type: manhattan_recall value: 88.3843717001056 - type: max_accuracy value: 76.39415268002165 - type: max_ap value: 83.60222701792158 - type: max_f1 value: 78.77947295423024 task: type: PairClassification - dataset: config: default name: MTEB OnlineShopping revision: e610f2ebd179a8fda30ae534c3878750a96db120 split: test type: C-MTEB/OnlineShopping-classification metrics: - type: accuracy value: 94.59 - type: accuracy_stderr value: 0.8971621926942733 - type: ap value: 93.01229797205905 - type: ap_stderr value: 1.0519542956523058 - type: f1 value: 94.58077736915268 - type: f1_stderr value: 0.8954928292768671 - type: main_score value: 94.59 task: type: Classification - dataset: config: default name: MTEB PAWSX revision: 9c6a90e430ac22b5779fb019a23e820b11a8b5e1 split: test type: C-MTEB/PAWSX metrics: - type: cosine_pearson value: 24.341872875292857 - type: cosine_spearman value: 30.570037022875436 - type: manhattan_pearson value: 31.41015320258418 - type: manhattan_spearman value: 30.604526098895114 - type: euclidean_pearson value: 31.400038084432175 - type: euclidean_spearman value: 30.61062265273698 - type: main_score value: 30.570037022875436 task: type: STS - dataset: config: default name: MTEB QBQTC revision: 790b0510dc52b1553e8c49f3d2afb48c0e5c48b7 split: test type: C-MTEB/QBQTC metrics: - type: cosine_pearson value: 36.61757468091905 - type: cosine_spearman value: 38.981417359835504 - type: manhattan_pearson value: 37.971127169578764 - type: manhattan_spearman value: 39.55028286687854 - type: euclidean_pearson value: 37.96983777648438 - type: euclidean_spearman value: 39.542856511171784 - type: main_score value: 38.981417359835504 task: type: STS - dataset: config: zh name: MTEB STS22 (zh) revision: eea2b4fe26a775864c896887d910b76a8098ad3f split: test type: mteb/sts22-crosslingual-sts metrics: - type: cosine_pearson value: 68.29834902017382 - type: cosine_spearman value: 68.6823378297782 - type: manhattan_pearson value: 68.47336169904406 - type: manhattan_spearman value: 69.08033223619941 - type: euclidean_pearson value: 68.38785956191622 - type: euclidean_spearman value: 68.97973814449657 - type: main_score value: 68.6823378297782 task: type: STS - dataset: config: default name: MTEB STSB revision: 0cde68302b3541bb8b3c340dc0644b0b745b3dc0 split: test type: C-MTEB/STSB metrics: - type: cosine_pearson value: 80.60572958563593 - type: cosine_spearman value: 80.87063761195603 - type: manhattan_pearson value: 79.30174059269083 - type: manhattan_spearman value: 80.02203618135883 - type: euclidean_pearson value: 79.3314553444783 - type: euclidean_spearman value: 80.04556415585255 - type: main_score value: 80.87063761195603 task: type: STS - dataset: config: default name: MTEB T2Reranking revision: 76631901a18387f85eaa53e5450019b87ad58ef9 split: dev type: C-MTEB/T2Reranking metrics: - type: map value: 67.47921173708028 - type: mrr value: 77.9396513739777 - type: main_score value: 67.47921173708028 task: type: Reranking - dataset: config: default name: MTEB T2Retrieval revision: 8731a845f1bf500a4f111cf1070785c793d10e64 split: dev type: C-MTEB/T2Retrieval metrics: - type: map_at_1 value: 28.021 - type: map_at_10 value: 79.149 - type: map_at_100 value: 82.613 - type: map_at_1000 value: 82.67099999999999 - type: map_at_3 value: 55.665 - type: map_at_5 value: 68.46900000000001 - type: mrr_at_1 value: 91.106 - type: mrr_at_10 value: 93.372 - type: mrr_at_100 value: 93.44200000000001 - type: mrr_at_1000 value: 93.445 - type: mrr_at_3 value: 92.99300000000001 - type: mrr_at_5 value: 93.24900000000001 - type: ndcg_at_1 value: 91.106 - type: ndcg_at_10 value: 86.259 - type: ndcg_at_100 value: 89.46600000000001 - type: ndcg_at_1000 value: 90.012 - type: ndcg_at_3 value: 87.574 - type: ndcg_at_5 value: 86.283 - type: precision_at_1 value: 91.106 - type: precision_at_10 value: 42.742999999999995 - type: precision_at_100 value: 5.029999999999999 - type: precision_at_1000 value: 0.516 - type: precision_at_3 value: 76.593 - type: precision_at_5 value: 64.243 - type: recall_at_1 value: 28.021 - type: recall_at_10 value: 85.184 - type: recall_at_100 value: 95.79299999999999 - type: recall_at_1000 value: 98.547 - type: recall_at_3 value: 57.233000000000004 - type: recall_at_5 value: 71.628 - type: main_score value: 86.259 task: type: Retrieval - dataset: config: default name: MTEB TNews revision: 317f262bf1e6126357bbe89e875451e4b0938fe4 split: validation type: C-MTEB/TNews-classification metrics: - type: accuracy value: 50.255 - type: accuracy_stderr value: 0.9341868121526873 - type: f1 value: 48.65080322457893 - type: f1_stderr value: 0.9391547591179161 - type: main_score value: 50.255 task: type: Classification - dataset: config: default name: MTEB ThuNewsClusteringP2P revision: 5798586b105c0434e4f0fe5e767abe619442cf93 split: test type: C-MTEB/ThuNewsClusteringP2P metrics: - type: main_score value: 64.32076022871308 - type: v_measure value: 64.32076022871308 - type: v_measure_std value: 0.7190996709617924 task: type: Clustering - dataset: config: default name: MTEB ThuNewsClusteringS2S revision: 8a8b2caeda43f39e13c4bc5bea0f8a667896e10d split: test type: C-MTEB/ThuNewsClusteringS2S metrics: - type: main_score value: 54.57080911705562 - type: v_measure value: 54.57080911705562 - type: v_measure_std value: 1.5185826402845883 task: type: Clustering - dataset: config: default name: MTEB VideoRetrieval revision: 58c2597a5943a2ba48f4668c3b90d796283c5639 split: dev type: C-MTEB/VideoRetrieval metrics: - type: map_at_1 value: 63.1 - type: map_at_10 value: 73.137 - type: map_at_100 value: 73.539 - type: map_at_1000 value: 73.546 - type: map_at_3 value: 71.467 - type: map_at_5 value: 72.552 - type: mrr_at_1 value: 63.3 - type: mrr_at_10 value: 73.238 - type: mrr_at_100 value: 73.64 - type: mrr_at_1000 value: 73.64699999999999 - type: mrr_at_3 value: 71.56700000000001 - type: mrr_at_5 value: 72.652 - type: ndcg_at_1 value: 63.1 - type: ndcg_at_10 value: 77.397 - type: ndcg_at_100 value: 79.11399999999999 - type: ndcg_at_1000 value: 79.305 - type: ndcg_at_3 value: 74.031 - type: ndcg_at_5 value: 75.976 - type: precision_at_1 value: 63.1 - type: precision_at_10 value: 9.049999999999999 - type: precision_at_100 value: 0.98 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 27.133000000000003 - type: precision_at_5 value: 17.22 - type: recall_at_1 value: 63.1 - type: recall_at_10 value: 90.5 - type: recall_at_100 value: 98.0 - type: recall_at_1000 value: 99.5 - type: recall_at_3 value: 81.39999999999999 - type: recall_at_5 value: 86.1 - type: main_score value: 77.397 task: type: Retrieval - dataset: config: default name: MTEB Waimai revision: 339287def212450dcaa9df8c22bf93e9980c7023 split: test type: C-MTEB/waimai-classification metrics: - type: accuracy value: 89.26 - type: accuracy_stderr value: 1.44651304867948 - type: ap value: 75.17154345788362 - type: ap_stderr value: 2.7356371110082565 - type: f1 value: 87.94016849813178 - type: f1_stderr value: 1.3897605039980534 - type: main_score value: 89.26 task: type: Classification - dataset: config: default name: MTEB AlloProfClusteringP2P revision: 392ba3f5bcc8c51f578786c1fc3dae648662cb9b split: test type: lyon-nlp/alloprof metrics: - type: main_score value: 71.20310003742769 - type: v_measure value: 71.20310003742769 - type: v_measure_std value: 2.3682783706448687 task: type: Clustering - dataset: config: default name: MTEB AlloProfClusteringS2S revision: 392ba3f5bcc8c51f578786c1fc3dae648662cb9b split: test type: lyon-nlp/alloprof metrics: - type: main_score value: 59.64232194434788 - type: v_measure value: 59.64232194434788 - type: v_measure_std value: 2.4292956011867557 task: type: Clustering - dataset: config: default name: MTEB AlloprofReranking revision: 65393d0d7a08a10b4e348135e824f385d420b0fd split: test type: lyon-nlp/mteb-fr-reranking-alloprof-s2p metrics: - type: main_score value: 78.62041803111894 - type: map value: 78.62041803111894 - type: mrr value: 79.82309057762426 - type: nAUC_map_diff1 value: 58.23586953459263 - type: nAUC_map_max value: 16.162821346484357 - type: nAUC_map_std value: 20.727030444422525 - type: nAUC_mrr_diff1 value: 57.89675675999501 - type: nAUC_mrr_max value: 17.188359535738417 - type: nAUC_mrr_std value: 20.121404571879598 task: type: Reranking - dataset: config: default name: MTEB AlloprofRetrieval revision: fcf295ea64c750f41fadbaa37b9b861558e1bfbd split: test type: lyon-nlp/alloprof metrics: - type: main_score value: 58.499 - type: map_at_1 value: 40.371 - type: map_at_10 value: 52.337 - type: map_at_100 value: 53.04 - type: map_at_1000 value: 53.065 - type: map_at_20 value: 52.772 - type: map_at_3 value: 49.201 - type: map_at_5 value: 51.025 - type: mrr_at_1 value: 40.3713298791019 - type: mrr_at_10 value: 52.322165337061755 - type: mrr_at_100 value: 53.02092832847133 - type: mrr_at_1000 value: 53.04594680215603 - type: mrr_at_20 value: 52.750849914358135 - type: mrr_at_3 value: 49.150834772596475 - type: mrr_at_5 value: 50.998848589522275 - type: nauc_map_at_1000_diff1 value: 44.71946249374932 - type: nauc_map_at_1000_max value: 28.074204125714193 - type: nauc_map_at_1000_std value: -5.1319087890196275 - type: nauc_map_at_100_diff1 value: 44.71140286780233 - type: nauc_map_at_100_max value: 28.09677884622645 - type: nauc_map_at_100_std value: -5.116353867480612 - type: nauc_map_at_10_diff1 value: 44.737968596047736 - type: nauc_map_at_10_max value: 28.103186472557184 - type: nauc_map_at_10_std value: -5.258817287329683 - type: nauc_map_at_1_diff1 value: 47.48389890056789 - type: nauc_map_at_1_max value: 24.803734709402654 - type: nauc_map_at_1_std value: -6.504759899363267 - type: nauc_map_at_20_diff1 value: 44.67268454863271 - type: nauc_map_at_20_max value: 28.068912295976933 - type: nauc_map_at_20_std value: -5.1971060419801836 - type: nauc_map_at_3_diff1 value: 44.59399231542881 - type: nauc_map_at_3_max value: 27.097806786915502 - type: nauc_map_at_3_std value: -5.957120508111229 - type: nauc_map_at_5_diff1 value: 44.549807218619236 - type: nauc_map_at_5_max value: 28.03902312965202 - type: nauc_map_at_5_std value: -5.279585300980128 - type: nauc_mrr_at_1000_diff1 value: 44.70183532803094 - type: nauc_mrr_at_1000_max value: 28.08833759937601 - type: nauc_mrr_at_1000_std value: -5.097929115475795 - type: nauc_mrr_at_100_diff1 value: 44.693824401340684 - type: nauc_mrr_at_100_max value: 28.110898009292296 - type: nauc_mrr_at_100_std value: -5.082401300601749 - type: nauc_mrr_at_10_diff1 value: 44.74052791862188 - type: nauc_mrr_at_10_max value: 28.125378341430725 - type: nauc_mrr_at_10_std value: -5.209767905428716 - type: nauc_mrr_at_1_diff1 value: 47.48389890056789 - type: nauc_mrr_at_1_max value: 24.803734709402654 - type: nauc_mrr_at_1_std value: -6.504759899363267 - type: nauc_mrr_at_20_diff1 value: 44.65204014980107 - type: nauc_mrr_at_20_max value: 28.071523791101487 - type: nauc_mrr_at_20_std value: -5.176680495032765 - type: nauc_mrr_at_3_diff1 value: 44.566371489967835 - type: nauc_mrr_at_3_max value: 27.138418179089243 - type: nauc_mrr_at_3_std value: -5.8860676927947715 - type: nauc_mrr_at_5_diff1 value: 44.513022796226025 - type: nauc_mrr_at_5_max value: 28.037968016529184 - type: nauc_mrr_at_5_std value: -5.286851060853457 - type: nauc_ndcg_at_1000_diff1 value: 44.31019947897497 - type: nauc_ndcg_at_1000_max value: 29.332844099450185 - type: nauc_ndcg_at_1000_std value: -4.185675731246788 - type: nauc_ndcg_at_100_diff1 value: 44.15415366286996 - type: nauc_ndcg_at_100_max value: 30.098413084162345 - type: nauc_ndcg_at_100_std value: -3.557438303045246 - type: nauc_ndcg_at_10_diff1 value: 44.117356815361376 - type: nauc_ndcg_at_10_max value: 30.090057186506147 - type: nauc_ndcg_at_10_std value: -4.294561567142078 - type: nauc_ndcg_at_1_diff1 value: 47.48389890056789 - type: nauc_ndcg_at_1_max value: 24.803734709402654 - type: nauc_ndcg_at_1_std value: -6.504759899363267 - type: nauc_ndcg_at_20_diff1 value: 43.868556983413285 - type: nauc_ndcg_at_20_max value: 30.06455269775592 - type: nauc_ndcg_at_20_std value: -3.9645560243946623 - type: nauc_ndcg_at_3_diff1 value: 43.71970793339256 - type: nauc_ndcg_at_3_max value: 28.057786581438034 - type: nauc_ndcg_at_3_std value: -5.597352364190012 - type: nauc_ndcg_at_5_diff1 value: 43.57692922989753 - type: nauc_ndcg_at_5_max value: 29.811975056854994 - type: nauc_ndcg_at_5_std value: -4.362865924703688 - type: nauc_precision_at_1000_diff1 value: 37.65255144893002 - type: nauc_precision_at_1000_max value: 88.70768683938714 - type: nauc_precision_at_1000_std value: 69.77642765639528 - type: nauc_precision_at_100_diff1 value: 38.99412121382678 - type: nauc_precision_at_100_max value: 61.57652450016459 - type: nauc_precision_at_100_std value: 24.826035139656348 - type: nauc_precision_at_10_diff1 value: 41.78189732924517 - type: nauc_precision_at_10_max value: 39.83536802453079 - type: nauc_precision_at_10_std value: 0.431964006091015 - type: nauc_precision_at_1_diff1 value: 47.48389890056789 - type: nauc_precision_at_1_max value: 24.803734709402654 - type: nauc_precision_at_1_std value: -6.504759899363267 - type: nauc_precision_at_20_diff1 value: 39.33781305274886 - type: nauc_precision_at_20_max value: 43.00448814568695 - type: nauc_precision_at_20_std value: 4.5633424143661365 - type: nauc_precision_at_3_diff1 value: 40.99977742505519 - type: nauc_precision_at_3_max value: 31.14585236181214 - type: nauc_precision_at_3_std value: -4.404002104899136 - type: nauc_precision_at_5_diff1 value: 40.12130730401297 - type: nauc_precision_at_5_max value: 36.45000981581976 - type: nauc_precision_at_5_std value: -0.8603896798394983 - type: nauc_recall_at_1000_diff1 value: 37.652551448927504 - type: nauc_recall_at_1000_max value: 88.70768683938547 - type: nauc_recall_at_1000_std value: 69.77642765638893 - type: nauc_recall_at_100_diff1 value: 38.9941212138267 - type: nauc_recall_at_100_max value: 61.57652450016457 - type: nauc_recall_at_100_std value: 24.82603513965631 - type: nauc_recall_at_10_diff1 value: 41.781897329245105 - type: nauc_recall_at_10_max value: 39.83536802453082 - type: nauc_recall_at_10_std value: 0.4319640060909985 - type: nauc_recall_at_1_diff1 value: 47.48389890056789 - type: nauc_recall_at_1_max value: 24.803734709402654 - type: nauc_recall_at_1_std value: -6.504759899363267 - type: nauc_recall_at_20_diff1 value: 39.337813052748835 - type: nauc_recall_at_20_max value: 43.00448814568676 - type: nauc_recall_at_20_std value: 4.56334241436601 - type: nauc_recall_at_3_diff1 value: 40.99977742505522 - type: nauc_recall_at_3_max value: 31.14585236181218 - type: nauc_recall_at_3_std value: -4.404002104899084 - type: nauc_recall_at_5_diff1 value: 40.121307304013 - type: nauc_recall_at_5_max value: 36.450009815819726 - type: nauc_recall_at_5_std value: -0.8603896798395225 - type: ndcg_at_1 value: 40.371 - type: ndcg_at_10 value: 58.499 - type: ndcg_at_100 value: 61.958 - type: ndcg_at_1000 value: 62.638000000000005 - type: ndcg_at_20 value: 60.068 - type: ndcg_at_3 value: 52.079 - type: ndcg_at_5 value: 55.359 - type: precision_at_1 value: 40.371 - type: precision_at_10 value: 7.797999999999999 - type: precision_at_100 value: 0.943 - type: precision_at_1000 value: 0.1 - type: precision_at_20 value: 4.208 - type: precision_at_3 value: 20.135 - type: precision_at_5 value: 13.669999999999998 - type: recall_at_1 value: 40.371 - type: recall_at_10 value: 77.979 - type: recall_at_100 value: 94.257 - type: recall_at_1000 value: 99.655 - type: recall_at_20 value: 84.154 - type: recall_at_3 value: 60.406000000000006 - type: recall_at_5 value: 68.351 task: type: Retrieval - dataset: config: fr name: MTEB AmazonReviewsClassification (fr) revision: 1399c76144fd37290681b995c656ef9b2e06e26d split: test type: mteb/amazon_reviews_multi metrics: - type: accuracy value: 55.186 - type: f1 value: 54.46705535013317 - type: f1_weighted value: 54.46705535013317 - type: main_score value: 55.186 task: type: Classification - dataset: config: default name: MTEB BSARDRetrieval revision: 5effa1b9b5fa3b0f9e12523e6e43e5f86a6e6d59 split: test type: maastrichtlawtech/bsard metrics: - type: main_score value: 65.766 - type: map_at_1 value: 17.116999999999997 - type: map_at_10 value: 24.2 - type: map_at_100 value: 25.196 - type: map_at_1000 value: 25.285999999999998 - type: map_at_20 value: 24.84 - type: map_at_3 value: 21.246000000000002 - type: map_at_5 value: 23.386000000000003 - type: mrr_at_1 value: 17.117117117117118 - type: mrr_at_10 value: 24.19955669955671 - type: mrr_at_100 value: 25.195531920335007 - type: mrr_at_1000 value: 25.284600511909495 - type: mrr_at_20 value: 24.840254977638896 - type: mrr_at_3 value: 21.246246246246244 - type: mrr_at_5 value: 23.38588588588589 - type: nauc_map_at_1000_diff1 value: 10.81116818873305 - type: nauc_map_at_1000_max value: 18.081485212587296 - type: nauc_map_at_1000_std value: 15.55247182359811 - type: nauc_map_at_100_diff1 value: 10.769025561727476 - type: nauc_map_at_100_max value: 18.05422658310923 - type: nauc_map_at_100_std value: 15.5467718904851 - type: nauc_map_at_10_diff1 value: 10.683272018434048 - type: nauc_map_at_10_max value: 18.142476171157714 - type: nauc_map_at_10_std value: 15.160871943210017 - type: nauc_map_at_1_diff1 value: 15.136874216646229 - type: nauc_map_at_1_max value: 19.68585969419655 - type: nauc_map_at_1_std value: 15.169957564848444 - type: nauc_map_at_20_diff1 value: 11.04316522915875 - type: nauc_map_at_20_max value: 17.817024791267443 - type: nauc_map_at_20_std value: 15.071246935999893 - type: nauc_map_at_3_diff1 value: 8.893328353778843 - type: nauc_map_at_3_max value: 16.402408590507946 - type: nauc_map_at_3_std value: 14.631998787185735 - type: nauc_map_at_5_diff1 value: 9.802455874823172 - type: nauc_map_at_5_max value: 17.939476196078495 - type: nauc_map_at_5_std value: 14.130589132632698 - type: nauc_mrr_at_1000_diff1 value: 10.813072323683013 - type: nauc_mrr_at_1000_max value: 18.08332318614462 - type: nauc_mrr_at_1000_std value: 15.553043223942819 - type: nauc_mrr_at_100_diff1 value: 10.77091057430458 - type: nauc_mrr_at_100_max value: 18.055798185778123 - type: nauc_mrr_at_100_std value: 15.547068262312003 - type: nauc_mrr_at_10_diff1 value: 10.683272018434048 - type: nauc_mrr_at_10_max value: 18.142476171157714 - type: nauc_mrr_at_10_std value: 15.160871943210017 - type: nauc_mrr_at_1_diff1 value: 15.136874216646229 - type: nauc_mrr_at_1_max value: 19.68585969419655 - type: nauc_mrr_at_1_std value: 15.169957564848444 - type: nauc_mrr_at_20_diff1 value: 11.04316522915875 - type: nauc_mrr_at_20_max value: 17.817024791267443 - type: nauc_mrr_at_20_std value: 15.071246935999893 - type: nauc_mrr_at_3_diff1 value: 8.893328353778843 - type: nauc_mrr_at_3_max value: 16.402408590507946 - type: nauc_mrr_at_3_std value: 14.631998787185735 - type: nauc_mrr_at_5_diff1 value: 9.802455874823172 - type: nauc_mrr_at_5_max value: 17.939476196078495 - type: nauc_mrr_at_5_std value: 14.130589132632698 - type: nauc_ndcg_at_1000_diff1 value: 11.202853727201774 - type: nauc_ndcg_at_1000_max value: 19.0293189527563 - type: nauc_ndcg_at_1000_std value: 18.390388750658357 - type: nauc_ndcg_at_100_diff1 value: 10.087335018055228 - type: nauc_ndcg_at_100_max value: 18.78516003607274 - type: nauc_ndcg_at_100_std value: 18.780357674944415 - type: nauc_ndcg_at_10_diff1 value: 10.574953671198443 - type: nauc_ndcg_at_10_max value: 18.572291623672044 - type: nauc_ndcg_at_10_std value: 15.808055075116057 - type: nauc_ndcg_at_1_diff1 value: 15.136874216646229 - type: nauc_ndcg_at_1_max value: 19.68585969419655 - type: nauc_ndcg_at_1_std value: 15.169957564848444 - type: nauc_ndcg_at_20_diff1 value: 11.86104023461335 - type: nauc_ndcg_at_20_max value: 17.436985589044458 - type: nauc_ndcg_at_20_std value: 15.588720372098383 - type: nauc_ndcg_at_3_diff1 value: 7.212552449189805 - type: nauc_ndcg_at_3_max value: 15.573909877641508 - type: nauc_ndcg_at_3_std value: 14.53705493856145 - type: nauc_ndcg_at_5_diff1 value: 8.778923731622235 - type: nauc_ndcg_at_5_max value: 18.140995131168534 - type: nauc_ndcg_at_5_std value: 13.608313703781533 - type: nauc_precision_at_1000_diff1 value: 21.242679241621413 - type: nauc_precision_at_1000_max value: 28.358433127289924 - type: nauc_precision_at_1000_std value: 43.82822797432329 - type: nauc_precision_at_100_diff1 value: 6.627014646720404 - type: nauc_precision_at_100_max value: 22.40433487802035 - type: nauc_precision_at_100_std value: 34.933889742457595 - type: nauc_precision_at_10_diff1 value: 10.885683410075934 - type: nauc_precision_at_10_max value: 19.96889041019717 - type: nauc_precision_at_10_std value: 17.798863824564464 - type: nauc_precision_at_1_diff1 value: 15.136874216646229 - type: nauc_precision_at_1_max value: 19.68585969419655 - type: nauc_precision_at_1_std value: 15.169957564848444 - type: nauc_precision_at_20_diff1 value: 15.496066928172066 - type: nauc_precision_at_20_max value: 16.03026652303162 - type: nauc_precision_at_20_std value: 17.26605341902364 - type: nauc_precision_at_3_diff1 value: 2.968469300914268 - type: nauc_precision_at_3_max value: 13.49791571660617 - type: nauc_precision_at_3_std value: 14.311739399090806 - type: nauc_precision_at_5_diff1 value: 6.502154730668018 - type: nauc_precision_at_5_max value: 18.889080152631124 - type: nauc_precision_at_5_std value: 12.221319698087786 - type: nauc_recall_at_1000_diff1 value: 21.242679241621435 - type: nauc_recall_at_1000_max value: 28.358433127289974 - type: nauc_recall_at_1000_std value: 43.82822797432328 - type: nauc_recall_at_100_diff1 value: 6.62701464672039 - type: nauc_recall_at_100_max value: 22.404334878020286 - type: nauc_recall_at_100_std value: 34.93388974245755 - type: nauc_recall_at_10_diff1 value: 10.885683410075906 - type: nauc_recall_at_10_max value: 19.968890410197133 - type: nauc_recall_at_10_std value: 17.7988638245644 - type: nauc_recall_at_1_diff1 value: 15.136874216646229 - type: nauc_recall_at_1_max value: 19.68585969419655 - type: nauc_recall_at_1_std value: 15.169957564848444 - type: nauc_recall_at_20_diff1 value: 15.49606692817206 - type: nauc_recall_at_20_max value: 16.030266523031628 - type: nauc_recall_at_20_std value: 17.26605341902362 - type: nauc_recall_at_3_diff1 value: 2.968469300914263 - type: nauc_recall_at_3_max value: 13.497915716606142 - type: nauc_recall_at_3_std value: 14.31173939909079 - type: nauc_recall_at_5_diff1 value: 6.50215473066801 - type: nauc_recall_at_5_max value: 18.889080152631095 - type: nauc_recall_at_5_std value: 12.221319698087767 - type: ndcg_at_1 value: 17.116999999999997 - type: ndcg_at_10 value: 28.524 - type: ndcg_at_100 value: 33.476 - type: ndcg_at_1000 value: 36.012 - type: ndcg_at_20 value: 30.820999999999998 - type: ndcg_at_3 value: 22.721 - type: ndcg_at_5 value: 26.596999999999998 - type: precision_at_1 value: 17.116999999999997 - type: precision_at_10 value: 4.234 - type: precision_at_100 value: 0.658 - type: precision_at_1000 value: 0.086 - type: precision_at_20 value: 2.568 - type: precision_at_3 value: 9.009 - type: precision_at_5 value: 7.297 - type: recall_at_1 value: 17.116999999999997 - type: recall_at_10 value: 42.342 - type: recall_at_100 value: 65.766 - type: recall_at_1000 value: 86.036 - type: recall_at_20 value: 51.351 - type: recall_at_3 value: 27.027 - type: recall_at_5 value: 36.486000000000004 task: type: Retrieval - dataset: config: default name: MTEB HALClusteringS2S revision: e06ebbbb123f8144bef1a5d18796f3dec9ae2915 split: test type: lyon-nlp/clustering-hal-s2s metrics: - type: main_score value: 28.18744772954557 - type: v_measure value: 28.18744772954557 - type: v_measure_std value: 3.239838057506439 task: type: Clustering - dataset: config: fr name: MTEB MLSUMClusteringP2P (fr) revision: b5d54f8f3b61ae17845046286940f03c6bc79bc7 split: test type: reciTAL/mlsum metrics: - type: main_score value: 47.75009059283003 - type: v_measure value: 47.75009059283003 - type: v_measure_std value: 2.009277732690298 task: type: Clustering - dataset: config: fr name: MTEB MLSUMClusteringS2S (fr) revision: b5d54f8f3b61ae17845046286940f03c6bc79bc7 split: test type: reciTAL/mlsum metrics: - type: main_score value: 47.46091989113078 - type: v_measure value: 47.46091989113078 - type: v_measure_std value: 2.604802270948194 task: type: Clustering - dataset: config: fr name: MTEB MTOPDomainClassification (fr) revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf split: test type: mteb/mtop_domain metrics: - type: accuracy value: 97.20325712496086 - type: f1 value: 97.05991090368462 - type: f1_weighted value: 97.20748006323807 - type: main_score value: 97.20325712496086 task: type: Classification - dataset: config: fr name: MTEB MTOPIntentClassification (fr) revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba split: test type: mteb/mtop_intent metrics: - type: accuracy value: 93.07234575634199 - type: f1 value: 76.54521288506878 - type: f1_weighted value: 93.6903586431893 - type: main_score value: 93.07234575634199 task: type: Classification - dataset: config: fra name: MTEB MasakhaNEWSClassification (fra) revision: 18193f187b92da67168c655c9973a165ed9593dd split: test type: mteb/masakhanews metrics: - type: accuracy value: 82.48815165876778 - type: f1 value: 78.71164464238117 - type: f1_weighted value: 82.38927389376973 - type: main_score value: 82.48815165876778 task: type: Classification - dataset: config: fra name: MTEB MasakhaNEWSClusteringP2P (fra) revision: 8ccc72e69e65f40c70e117d8b3c08306bb788b60 split: test type: masakhane/masakhanews metrics: - type: main_score value: 73.85712952800003 - type: v_measure value: 73.85712952800003 - type: v_measure_std value: 22.471668299794416 task: type: Clustering - dataset: config: fra name: MTEB MasakhaNEWSClusteringS2S (fra) revision: 8ccc72e69e65f40c70e117d8b3c08306bb788b60 split: test type: masakhane/masakhanews metrics: - type: main_score value: 67.23960512566751 - type: v_measure value: 67.23960512566751 - type: v_measure_std value: 24.65079601360142 task: type: Clustering - dataset: config: fr name: MTEB MassiveIntentClassification (fr) revision: 4672e20407010da34463acc759c162ca9734bca6 split: test type: mteb/amazon_massive_intent metrics: - type: accuracy value: 79.59986550100874 - type: f1 value: 76.0439154517916 - type: f1_weighted value: 79.48538292013761 - type: main_score value: 79.59986550100874 task: type: Classification - dataset: config: fr name: MTEB MassiveScenarioClassification (fr) revision: fad2c6e8459f9e1c45d9315f4953d921437d70f8 split: test type: mteb/amazon_massive_scenario metrics: - type: accuracy value: 82.182246133154 - type: f1 value: 81.68006668655397 - type: f1_weighted value: 81.94775072858566 - type: main_score value: 82.182246133154 task: type: Classification - dataset: config: fr name: MTEB MintakaRetrieval (fr) revision: efa78cc2f74bbcd21eff2261f9e13aebe40b814e split: test type: jinaai/mintakaqa metrics: - type: main_score value: 62.532 - type: map_at_1 value: 45.823 - type: map_at_10 value: 57.174 - type: map_at_100 value: 57.735 - type: map_at_1000 value: 57.767 - type: map_at_20 value: 57.53 - type: map_at_3 value: 54.716 - type: map_at_5 value: 56.227000000000004 - type: mrr_at_1 value: 45.82309582309582 - type: mrr_at_10 value: 57.17958217958217 - type: mrr_at_100 value: 57.744059413627866 - type: mrr_at_1000 value: 57.776651992832605 - type: mrr_at_20 value: 57.53890924556554 - type: mrr_at_3 value: 54.716079716079676 - type: mrr_at_5 value: 56.227136227136256 - type: nauc_map_at_1000_diff1 value: 39.48401851944296 - type: nauc_map_at_1000_max value: 36.55276875160682 - type: nauc_map_at_1000_std value: 3.9173787361040913 - type: nauc_map_at_100_diff1 value: 39.45696514871956 - type: nauc_map_at_100_max value: 36.55786982498759 - type: nauc_map_at_100_std value: 3.9506714061766557 - type: nauc_map_at_10_diff1 value: 39.31548009319837 - type: nauc_map_at_10_max value: 36.75711871602276 - type: nauc_map_at_10_std value: 3.782911249250981 - type: nauc_map_at_1_diff1 value: 44.190649439568766 - type: nauc_map_at_1_max value: 31.017419446234317 - type: nauc_map_at_1_std value: 0.5544388561183956 - type: nauc_map_at_20_diff1 value: 39.443640617310585 - type: nauc_map_at_20_max value: 36.63799366674228 - type: nauc_map_at_20_std value: 3.934276303386171 - type: nauc_map_at_3_diff1 value: 40.30871768246873 - type: nauc_map_at_3_max value: 36.944169455458656 - type: nauc_map_at_3_std value: 2.9847330185694556 - type: nauc_map_at_5_diff1 value: 39.590461060438095 - type: nauc_map_at_5_max value: 36.998781454405574 - type: nauc_map_at_5_std value: 3.532693606637119 - type: nauc_mrr_at_1000_diff1 value: 39.46102363098429 - type: nauc_mrr_at_1000_max value: 36.56900606103558 - type: nauc_mrr_at_1000_std value: 3.972436075561705 - type: nauc_mrr_at_100_diff1 value: 39.43269261665982 - type: nauc_mrr_at_100_max value: 36.574081599242014 - type: nauc_mrr_at_100_std value: 4.006374171904806 - type: nauc_mrr_at_10_diff1 value: 39.29970560564493 - type: nauc_mrr_at_10_max value: 36.778388879484716 - type: nauc_mrr_at_10_std value: 3.8335456201567206 - type: nauc_mrr_at_1_diff1 value: 44.190649439568766 - type: nauc_mrr_at_1_max value: 31.017419446234317 - type: nauc_mrr_at_1_std value: 0.5544388561183956 - type: nauc_mrr_at_20_diff1 value: 39.42091158484574 - type: nauc_mrr_at_20_max value: 36.65421566061936 - type: nauc_mrr_at_20_std value: 3.988695948848555 - type: nauc_mrr_at_3_diff1 value: 40.313976315898195 - type: nauc_mrr_at_3_max value: 36.960483501441985 - type: nauc_mrr_at_3_std value: 3.0112756156560394 - type: nauc_mrr_at_5_diff1 value: 39.56386294620379 - type: nauc_mrr_at_5_max value: 37.02119815939672 - type: nauc_mrr_at_5_std value: 3.6118004205573184 - type: nauc_ndcg_at_1000_diff1 value: 38.05281585863137 - type: nauc_ndcg_at_1000_max value: 37.41178875860201 - type: nauc_ndcg_at_1000_std value: 5.525420555163393 - type: nauc_ndcg_at_100_diff1 value: 37.18408005856676 - type: nauc_ndcg_at_100_max value: 37.617851212997685 - type: nauc_ndcg_at_100_std value: 6.871461890669446 - type: nauc_ndcg_at_10_diff1 value: 36.624444841382484 - type: nauc_ndcg_at_10_max value: 38.62100324849529 - type: nauc_ndcg_at_10_std value: 6.027810657475449 - type: nauc_ndcg_at_1_diff1 value: 44.190649439568766 - type: nauc_ndcg_at_1_max value: 31.017419446234317 - type: nauc_ndcg_at_1_std value: 0.5544388561183956 - type: nauc_ndcg_at_20_diff1 value: 37.057047514121564 - type: nauc_ndcg_at_20_max value: 38.19839331454421 - type: nauc_ndcg_at_20_std value: 6.770369938343684 - type: nauc_ndcg_at_3_diff1 value: 38.95821428563954 - type: nauc_ndcg_at_3_max value: 38.87440219376017 - type: nauc_ndcg_at_3_std value: 4.097498274708613 - type: nauc_ndcg_at_5_diff1 value: 37.515589837182034 - type: nauc_ndcg_at_5_max value: 39.165561493023276 - type: nauc_ndcg_at_5_std value: 5.291512124344874 - type: nauc_precision_at_1000_diff1 value: -13.365474882749279 - type: nauc_precision_at_1000_max value: 50.68568417959442 - type: nauc_precision_at_1000_std value: 37.847145129019054 - type: nauc_precision_at_100_diff1 value: 12.081443207482383 - type: nauc_precision_at_100_max value: 43.67561356191485 - type: nauc_precision_at_100_std value: 44.64523987759538 - type: nauc_precision_at_10_diff1 value: 23.20358204183261 - type: nauc_precision_at_10_max value: 46.93706139285088 - type: nauc_precision_at_10_std value: 17.36243956517301 - type: nauc_precision_at_1_diff1 value: 44.190649439568766 - type: nauc_precision_at_1_max value: 31.017419446234317 - type: nauc_precision_at_1_std value: 0.5544388561183956 - type: nauc_precision_at_20_diff1 value: 22.42836999246196 - type: nauc_precision_at_20_max value: 46.29381413041759 - type: nauc_precision_at_20_std value: 26.126609401922696 - type: nauc_precision_at_3_diff1 value: 34.503018704702484 - type: nauc_precision_at_3_max value: 45.194775358016095 - type: nauc_precision_at_3_std value: 7.864444241838433 - type: nauc_precision_at_5_diff1 value: 29.494641243672138 - type: nauc_precision_at_5_max value: 47.326071718857484 - type: nauc_precision_at_5_std value: 12.273738036245172 - type: nauc_recall_at_1000_diff1 value: -13.365474882756335 - type: nauc_recall_at_1000_max value: 50.68568417959348 - type: nauc_recall_at_1000_std value: 37.8471451290128 - type: nauc_recall_at_100_diff1 value: 12.08144320748251 - type: nauc_recall_at_100_max value: 43.675613561914986 - type: nauc_recall_at_100_std value: 44.645239877595564 - type: nauc_recall_at_10_diff1 value: 23.203582041832526 - type: nauc_recall_at_10_max value: 46.9370613928509 - type: nauc_recall_at_10_std value: 17.36243956517297 - type: nauc_recall_at_1_diff1 value: 44.190649439568766 - type: nauc_recall_at_1_max value: 31.017419446234317 - type: nauc_recall_at_1_std value: 0.5544388561183956 - type: nauc_recall_at_20_diff1 value: 22.42836999246212 - type: nauc_recall_at_20_max value: 46.29381413041773 - type: nauc_recall_at_20_std value: 26.12660940192268 - type: nauc_recall_at_3_diff1 value: 34.50301870470248 - type: nauc_recall_at_3_max value: 45.19477535801611 - type: nauc_recall_at_3_std value: 7.8644442418384335 - type: nauc_recall_at_5_diff1 value: 29.494641243672216 - type: nauc_recall_at_5_max value: 47.32607171885759 - type: nauc_recall_at_5_std value: 12.273738036245142 - type: ndcg_at_1 value: 45.823 - type: ndcg_at_10 value: 62.532 - type: ndcg_at_100 value: 65.298 - type: ndcg_at_1000 value: 66.214 - type: ndcg_at_20 value: 63.82600000000001 - type: ndcg_at_3 value: 57.528999999999996 - type: ndcg_at_5 value: 60.24 - type: precision_at_1 value: 45.823 - type: precision_at_10 value: 7.928 - type: precision_at_100 value: 0.923 - type: precision_at_1000 value: 0.1 - type: precision_at_20 value: 4.22 - type: precision_at_3 value: 21.881 - type: precision_at_5 value: 14.438999999999998 - type: recall_at_1 value: 45.823 - type: recall_at_10 value: 79.279 - type: recall_at_100 value: 92.301 - type: recall_at_1000 value: 99.631 - type: recall_at_20 value: 84.398 - type: recall_at_3 value: 65.643 - type: recall_at_5 value: 72.195 task: type: Retrieval - dataset: config: fr name: MTEB OpusparcusPC (fr) revision: 9e9b1f8ef51616073f47f306f7f47dd91663f86a split: test type: GEM/opusparcus metrics: - type: cosine_accuracy value: 99.90069513406156 - type: cosine_accuracy_threshold value: 54.45001207375879 - type: cosine_ap value: 100.0 - type: cosine_f1 value: 99.95032290114257 - type: cosine_f1_threshold value: 54.45001207375879 - type: cosine_precision value: 100.0 - type: cosine_recall value: 99.90069513406156 - type: dot_accuracy value: 99.90069513406156 - type: dot_accuracy_threshold value: 1312800.0 - type: dot_ap value: 100.0 - type: dot_f1 value: 99.95032290114257 - type: dot_f1_threshold value: 1312800.0 - type: dot_precision value: 100.0 - type: dot_recall value: 99.90069513406156 - type: euclidean_accuracy value: 99.90069513406156 - type: euclidean_accuracy_threshold value: 15150.791732002876 - type: euclidean_ap value: 100.0 - type: euclidean_f1 value: 99.95032290114257 - type: euclidean_f1_threshold value: 15150.791732002876 - type: euclidean_precision value: 100.0 - type: euclidean_recall value: 99.90069513406156 - type: main_score value: 100.0 - type: manhattan_accuracy value: 99.90069513406156 - type: manhattan_accuracy_threshold value: 717903.2791554928 - type: manhattan_ap value: 100.0 - type: manhattan_f1 value: 99.95032290114257 - type: manhattan_f1_threshold value: 717903.2791554928 - type: manhattan_precision value: 100.0 - type: manhattan_recall value: 99.90069513406156 - type: max_ap value: 100.0 - type: max_f1 value: 99.95032290114257 - type: max_precision value: 100.0 - type: max_recall value: 99.90069513406156 - type: similarity_accuracy value: 99.90069513406156 - type: similarity_accuracy_threshold value: 54.45001207375879 - type: similarity_ap value: 100.0 - type: similarity_f1 value: 99.95032290114257 - type: similarity_f1_threshold value: 54.45001207375879 - type: similarity_precision value: 100.0 - type: similarity_recall value: 99.90069513406156 task: type: PairClassification - dataset: config: fr name: MTEB PawsXPairClassification (fr) revision: 8a04d940a42cd40658986fdd8e3da561533a3646 split: test type: google-research-datasets/paws-x metrics: - type: cosine_accuracy value: 67.95 - type: cosine_accuracy_threshold value: 97.36901285947026 - type: cosine_ap value: 70.14158727060726 - type: cosine_f1 value: 65.38108356290174 - type: cosine_f1_threshold value: 94.90683744884689 - type: cosine_precision value: 55.84313725490196 - type: cosine_recall value: 78.8482834994463 - type: dot_accuracy value: 60.5 - type: dot_accuracy_threshold value: 2606400.0 - type: dot_ap value: 57.0114505567262 - type: dot_f1 value: 63.29394387001477 - type: dot_f1_threshold value: 2345600.0 - type: dot_precision value: 47.4792243767313 - type: dot_recall value: 94.90586932447398 - type: euclidean_accuracy value: 68.05 - type: euclidean_accuracy_threshold value: 3824.99743197985 - type: euclidean_ap value: 70.01158306654237 - type: euclidean_f1 value: 65.21939953810623 - type: euclidean_f1_threshold value: 5187.47968966464 - type: euclidean_precision value: 55.942947702060216 - type: euclidean_recall value: 78.18383167220377 - type: main_score value: 70.14158727060726 - type: manhattan_accuracy value: 68.05 - type: manhattan_accuracy_threshold value: 191852.34832763672 - type: manhattan_ap value: 70.01670033904287 - type: manhattan_f1 value: 65.2854511970534 - type: manhattan_f1_threshold value: 246807.1710705757 - type: manhattan_precision value: 55.87076438140268 - type: manhattan_recall value: 78.51605758582502 - type: max_ap value: 70.14158727060726 - type: max_f1 value: 65.38108356290174 - type: max_precision value: 55.942947702060216 - type: max_recall value: 94.90586932447398 - type: similarity_accuracy value: 67.95 - type: similarity_accuracy_threshold value: 97.36901285947026 - type: similarity_ap value: 70.14158727060726 - type: similarity_f1 value: 65.38108356290174 - type: similarity_f1_threshold value: 94.90683744884689 - type: similarity_precision value: 55.84313725490196 - type: similarity_recall value: 78.8482834994463 task: type: PairClassification - dataset: config: default name: MTEB SICKFr revision: e077ab4cf4774a1e36d86d593b150422fafd8e8a split: test type: Lajavaness/SICK-fr metrics: - type: cosine_pearson value: 79.79861486027 - type: cosine_spearman value: 79.3918786992987 - type: euclidean_pearson value: 77.73226212475764 - type: euclidean_spearman value: 79.08856888397014 - type: main_score value: 79.3918786992987 - type: manhattan_pearson value: 77.8002206650809 - type: manhattan_spearman value: 79.15284532531264 - type: pearson value: 79.79861486027 - type: spearman value: 79.3918786992987 task: type: STS - dataset: config: fr name: MTEB STS22 (fr) revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3 split: test type: mteb/sts22-crosslingual-sts metrics: - type: cosine_pearson value: 83.32314025534286 - type: cosine_spearman value: 83.2806004701507 - type: euclidean_pearson value: 81.88040500817269 - type: euclidean_spearman value: 82.73179823676206 - type: main_score value: 83.2806004701507 - type: manhattan_pearson value: 82.0438174605579 - type: manhattan_spearman value: 83.0253049811576 - type: pearson value: 83.32314025534286 - type: spearman value: 83.2806004701507 task: type: STS - dataset: config: fr name: MTEB STSBenchmarkMultilingualSTS (fr) revision: 29afa2569dcedaaa2fe6a3dcfebab33d28b82e8c split: test type: mteb/stsb_multi_mt metrics: - type: cosine_pearson value: 84.56723075054445 - type: cosine_spearman value: 85.08759191551403 - type: euclidean_pearson value: 83.186096744725 - type: euclidean_spearman value: 84.36958569816491 - type: main_score value: 85.08759191551403 - type: manhattan_pearson value: 83.1405072165467 - type: manhattan_spearman value: 84.34227830781155 - type: pearson value: 84.56723075054445 - type: spearman value: 85.08759191551403 task: type: STS - dataset: config: default name: MTEB SummEvalFr revision: b385812de6a9577b6f4d0f88c6a6e35395a94054 split: test type: lyon-nlp/summarization-summeval-fr-p2p metrics: - type: cosine_pearson value: 31.921764332449115 - type: cosine_spearman value: 31.260442997631806 - type: dot_pearson value: 31.585578707631406 - type: dot_spearman value: 31.479238746310028 - type: main_score value: 31.260442997631806 - type: pearson value: 31.921764332449115 - type: spearman value: 31.260442997631806 task: type: Summarization - dataset: config: default name: MTEB SyntecReranking revision: daf0863838cd9e3ba50544cdce3ac2b338a1b0ad split: test type: lyon-nlp/mteb-fr-reranking-syntec-s2p metrics: - type: main_score value: 91.83333333333333 - type: map value: 91.83333333333333 - type: mrr value: 92.0 - type: nAUC_map_diff1 value: 53.97793263646914 - type: nAUC_map_max value: 44.264158743282195 - type: nAUC_map_std value: 14.692218350754885 - type: nAUC_mrr_diff1 value: 54.36926882239366 - type: nAUC_mrr_max value: 46.43108510296003 - type: nAUC_mrr_std value: 17.48914092664096 task: type: Reranking - dataset: config: default name: MTEB SyntecRetrieval revision: 19661ccdca4dfc2d15122d776b61685f48c68ca9 split: test type: lyon-nlp/mteb-fr-retrieval-syntec-s2p metrics: - type: main_score value: 90.36699999999999 - type: map_at_1 value: 79.0 - type: map_at_10 value: 87.18599999999999 - type: map_at_100 value: 87.18599999999999 - type: map_at_1000 value: 87.18599999999999 - type: map_at_20 value: 87.18599999999999 - type: map_at_3 value: 86.0 - type: map_at_5 value: 86.95 - type: mrr_at_1 value: 79.0 - type: mrr_at_10 value: 87.18611111111112 - type: mrr_at_100 value: 87.18611111111112 - type: mrr_at_1000 value: 87.18611111111112 - type: mrr_at_20 value: 87.18611111111112 - type: mrr_at_3 value: 86.0 - type: mrr_at_5 value: 86.95 - type: nauc_map_at_1000_diff1 value: 63.05539428169271 - type: nauc_map_at_1000_max value: 45.428107132447124 - type: nauc_map_at_1000_std value: 13.94507583970834 - type: nauc_map_at_100_diff1 value: 63.05539428169271 - type: nauc_map_at_100_max value: 45.428107132447124 - type: nauc_map_at_100_std value: 13.94507583970834 - type: nauc_map_at_10_diff1 value: 63.05539428169271 - type: nauc_map_at_10_max value: 45.428107132447124 - type: nauc_map_at_10_std value: 13.94507583970834 - type: nauc_map_at_1_diff1 value: 64.24122923028831 - type: nauc_map_at_1_max value: 44.34077957053877 - type: nauc_map_at_1_std value: 9.594344386466878 - type: nauc_map_at_20_diff1 value: 63.05539428169271 - type: nauc_map_at_20_max value: 45.428107132447124 - type: nauc_map_at_20_std value: 13.94507583970834 - type: nauc_map_at_3_diff1 value: 62.30831315577075 - type: nauc_map_at_3_max value: 47.33980193586779 - type: nauc_map_at_3_std value: 16.132624025733 - type: nauc_map_at_5_diff1 value: 63.079622378971834 - type: nauc_map_at_5_max value: 45.13424437707254 - type: nauc_map_at_5_std value: 13.730785051570013 - type: nauc_mrr_at_1000_diff1 value: 63.05539428169271 - type: nauc_mrr_at_1000_max value: 45.428107132447124 - type: nauc_mrr_at_1000_std value: 13.94507583970834 - type: nauc_mrr_at_100_diff1 value: 63.05539428169271 - type: nauc_mrr_at_100_max value: 45.428107132447124 - type: nauc_mrr_at_100_std value: 13.94507583970834 - type: nauc_mrr_at_10_diff1 value: 63.05539428169271 - type: nauc_mrr_at_10_max value: 45.428107132447124 - type: nauc_mrr_at_10_std value: 13.94507583970834 - type: nauc_mrr_at_1_diff1 value: 64.24122923028831 - type: nauc_mrr_at_1_max value: 44.34077957053877 - type: nauc_mrr_at_1_std value: 9.594344386466878 - type: nauc_mrr_at_20_diff1 value: 63.05539428169271 - type: nauc_mrr_at_20_max value: 45.428107132447124 - type: nauc_mrr_at_20_std value: 13.94507583970834 - type: nauc_mrr_at_3_diff1 value: 62.30831315577075 - type: nauc_mrr_at_3_max value: 47.33980193586779 - type: nauc_mrr_at_3_std value: 16.132624025733 - type: nauc_mrr_at_5_diff1 value: 63.079622378971834 - type: nauc_mrr_at_5_max value: 45.13424437707254 - type: nauc_mrr_at_5_std value: 13.730785051570013 - type: nauc_ndcg_at_1000_diff1 value: 62.97376441474187 - type: nauc_ndcg_at_1000_max value: 45.457846840130586 - type: nauc_ndcg_at_1000_std value: 14.17695491254452 - type: nauc_ndcg_at_100_diff1 value: 62.97376441474187 - type: nauc_ndcg_at_100_max value: 45.457846840130586 - type: nauc_ndcg_at_100_std value: 14.17695491254452 - type: nauc_ndcg_at_10_diff1 value: 62.97376441474187 - type: nauc_ndcg_at_10_max value: 45.457846840130586 - type: nauc_ndcg_at_10_std value: 14.17695491254452 - type: nauc_ndcg_at_1_diff1 value: 64.24122923028831 - type: nauc_ndcg_at_1_max value: 44.34077957053877 - type: nauc_ndcg_at_1_std value: 9.594344386466878 - type: nauc_ndcg_at_20_diff1 value: 62.97376441474187 - type: nauc_ndcg_at_20_max value: 45.457846840130586 - type: nauc_ndcg_at_20_std value: 14.17695491254452 - type: nauc_ndcg_at_3_diff1 value: 61.47043349797183 - type: nauc_ndcg_at_3_max value: 49.12165820225059 - type: nauc_ndcg_at_3_std value: 18.525396343409568 - type: nauc_ndcg_at_5_diff1 value: 63.04022063936115 - type: nauc_ndcg_at_5_max value: 44.381937619091765 - type: nauc_ndcg_at_5_std value: 13.3263412698325 - type: nauc_precision_at_1000_diff1 value: .nan - type: nauc_precision_at_1000_max value: .nan - type: nauc_precision_at_1000_std value: .nan - type: nauc_precision_at_100_diff1 value: .nan - type: nauc_precision_at_100_max value: .nan - type: nauc_precision_at_100_std value: .nan - type: nauc_precision_at_10_diff1 value: 100.0 - type: nauc_precision_at_10_max value: 100.0 - type: nauc_precision_at_10_std value: 100.0 - type: nauc_precision_at_1_diff1 value: 64.24122923028831 - type: nauc_precision_at_1_max value: 44.34077957053877 - type: nauc_precision_at_1_std value: 9.594344386466878 - type: nauc_precision_at_20_diff1 value: 100.0 - type: nauc_precision_at_20_max value: 100.0 - type: nauc_precision_at_20_std value: 100.0 - type: nauc_precision_at_3_diff1 value: 56.27917833800158 - type: nauc_precision_at_3_max value: 60.51976346093969 - type: nauc_precision_at_3_std value: 33.02209772798002 - type: nauc_precision_at_5_diff1 value: 63.81886087768404 - type: nauc_precision_at_5_max value: 27.544351073763345 - type: nauc_precision_at_5_std value: -0.4668534080301362 - type: nauc_recall_at_1000_diff1 value: .nan - type: nauc_recall_at_1000_max value: .nan - type: nauc_recall_at_1000_std value: .nan - type: nauc_recall_at_100_diff1 value: .nan - type: nauc_recall_at_100_max value: .nan - type: nauc_recall_at_100_std value: .nan - type: nauc_recall_at_10_diff1 value: .nan - type: nauc_recall_at_10_max value: .nan - type: nauc_recall_at_10_std value: .nan - type: nauc_recall_at_1_diff1 value: 64.24122923028831 - type: nauc_recall_at_1_max value: 44.34077957053877 - type: nauc_recall_at_1_std value: 9.594344386466878 - type: nauc_recall_at_20_diff1 value: .nan - type: nauc_recall_at_20_max value: .nan - type: nauc_recall_at_20_std value: .nan - type: nauc_recall_at_3_diff1 value: 56.27917833800187 - type: nauc_recall_at_3_max value: 60.51976346094 - type: nauc_recall_at_3_std value: 33.022097727980125 - type: nauc_recall_at_5_diff1 value: 63.81886087768457 - type: nauc_recall_at_5_max value: 27.544351073763107 - type: nauc_recall_at_5_std value: -0.46685340803013775 - type: ndcg_at_1 value: 79.0 - type: ndcg_at_10 value: 90.36699999999999 - type: ndcg_at_100 value: 90.36699999999999 - type: ndcg_at_1000 value: 90.36699999999999 - type: ndcg_at_20 value: 90.36699999999999 - type: ndcg_at_3 value: 88.071 - type: ndcg_at_5 value: 89.75 - type: precision_at_1 value: 79.0 - type: precision_at_10 value: 10.0 - type: precision_at_100 value: 1.0 - type: precision_at_1000 value: 0.1 - type: precision_at_20 value: 5.0 - type: precision_at_3 value: 31.333 - type: precision_at_5 value: 19.6 - type: recall_at_1 value: 79.0 - type: recall_at_10 value: 100.0 - type: recall_at_100 value: 100.0 - type: recall_at_1000 value: 100.0 - type: recall_at_20 value: 100.0 - type: recall_at_3 value: 94.0 - type: recall_at_5 value: 98.0 task: type: Retrieval - dataset: config: fra-fra name: MTEB XPQARetrieval (fr) revision: c99d599f0a6ab9b85b065da6f9d94f9cf731679f split: test type: jinaai/xpqa metrics: - type: main_score value: 77.425 - type: map_at_1 value: 46.749 - type: map_at_10 value: 72.108 - type: map_at_100 value: 73.32499999999999 - type: map_at_1000 value: 73.341 - type: map_at_20 value: 72.991 - type: map_at_3 value: 65.09 - type: map_at_5 value: 70.137 - type: mrr_at_1 value: 71.82910547396529 - type: mrr_at_10 value: 78.63357492529722 - type: mrr_at_100 value: 78.97374961354801 - type: mrr_at_1000 value: 78.97840549855806 - type: mrr_at_20 value: 78.86005025292395 - type: mrr_at_3 value: 77.28081886960389 - type: mrr_at_5 value: 78.0551846906987 - type: nauc_map_at_1000_diff1 value: 57.508397030020156 - type: nauc_map_at_1000_max value: 43.80251983780665 - type: nauc_map_at_1000_std value: -16.231491160419434 - type: nauc_map_at_100_diff1 value: 57.48614844875469 - type: nauc_map_at_100_max value: 43.797011627763055 - type: nauc_map_at_100_std value: -16.239303348969592 - type: nauc_map_at_10_diff1 value: 57.254064849553934 - type: nauc_map_at_10_max value: 42.765535577219026 - type: nauc_map_at_10_std value: -17.255606315997156 - type: nauc_map_at_1_diff1 value: 65.04324659040175 - type: nauc_map_at_1_max value: 17.852220653388855 - type: nauc_map_at_1_std value: -14.257753661018779 - type: nauc_map_at_20_diff1 value: 57.48367588324867 - type: nauc_map_at_20_max value: 43.680084254814425 - type: nauc_map_at_20_std value: -16.59381108810359 - type: nauc_map_at_3_diff1 value: 58.328817274958276 - type: nauc_map_at_3_max value: 34.603370607250675 - type: nauc_map_at_3_std value: -15.326569334165047 - type: nauc_map_at_5_diff1 value: 57.544271139796365 - type: nauc_map_at_5_max value: 41.58159814532708 - type: nauc_map_at_5_std value: -17.035562345654515 - type: nauc_mrr_at_1000_diff1 value: 67.23053035385993 - type: nauc_mrr_at_1000_max value: 53.982556981667095 - type: nauc_mrr_at_1000_std value: -12.015571062417035 - type: nauc_mrr_at_100_diff1 value: 67.23047293440347 - type: nauc_mrr_at_100_max value: 53.97931489747768 - type: nauc_mrr_at_100_std value: -12.026957248146365 - type: nauc_mrr_at_10_diff1 value: 67.25927907237941 - type: nauc_mrr_at_10_max value: 53.99647347811833 - type: nauc_mrr_at_10_std value: -12.356365137919108 - type: nauc_mrr_at_1_diff1 value: 67.80552098159194 - type: nauc_mrr_at_1_max value: 52.34740974885752 - type: nauc_mrr_at_1_std value: -9.009347371853096 - type: nauc_mrr_at_20_diff1 value: 67.22472566769486 - type: nauc_mrr_at_20_max value: 54.03480374123263 - type: nauc_mrr_at_20_std value: -12.129416933895373 - type: nauc_mrr_at_3_diff1 value: 66.86636026044627 - type: nauc_mrr_at_3_max value: 53.84675762408544 - type: nauc_mrr_at_3_std value: -12.318414220208327 - type: nauc_mrr_at_5_diff1 value: 67.16713697443882 - type: nauc_mrr_at_5_max value: 54.174275682276765 - type: nauc_mrr_at_5_std value: -12.382704200660772 - type: nauc_ndcg_at_1000_diff1 value: 60.076768803793875 - type: nauc_ndcg_at_1000_max value: 48.06880976583911 - type: nauc_ndcg_at_1000_std value: -14.8002468401513 - type: nauc_ndcg_at_100_diff1 value: 59.84195440900073 - type: nauc_ndcg_at_100_max value: 48.031759882567265 - type: nauc_ndcg_at_100_std value: -14.93671795434138 - type: nauc_ndcg_at_10_diff1 value: 59.091362656630984 - type: nauc_ndcg_at_10_max value: 45.902216798175296 - type: nauc_ndcg_at_10_std value: -18.225812204918686 - type: nauc_ndcg_at_1_diff1 value: 67.80552098159194 - type: nauc_ndcg_at_1_max value: 52.34740974885752 - type: nauc_ndcg_at_1_std value: -9.009347371853096 - type: nauc_ndcg_at_20_diff1 value: 59.80472569029982 - type: nauc_ndcg_at_20_max value: 47.92221974783734 - type: nauc_ndcg_at_20_std value: -16.589965314279805 - type: nauc_ndcg_at_3_diff1 value: 56.9195769675713 - type: nauc_ndcg_at_3_max value: 44.992740041222575 - type: nauc_ndcg_at_3_std value: -16.329730380555382 - type: nauc_ndcg_at_5_diff1 value: 59.31912266230594 - type: nauc_ndcg_at_5_max value: 44.75423089733974 - type: nauc_ndcg_at_5_std value: -17.744216780645583 - type: nauc_precision_at_1000_diff1 value: -30.976050318575094 - type: nauc_precision_at_1000_max value: 16.55619583017722 - type: nauc_precision_at_1000_std value: 10.549164466552044 - type: nauc_precision_at_100_diff1 value: -30.217028356940872 - type: nauc_precision_at_100_max value: 17.709049202840184 - type: nauc_precision_at_100_std value: 10.04190905252673 - type: nauc_precision_at_10_diff1 value: -19.588612396735584 - type: nauc_precision_at_10_max value: 23.97095583735318 - type: nauc_precision_at_10_std value: 1.3308819095790259 - type: nauc_precision_at_1_diff1 value: 67.80552098159194 - type: nauc_precision_at_1_max value: 52.34740974885752 - type: nauc_precision_at_1_std value: -9.009347371853096 - type: nauc_precision_at_20_diff1 value: -24.56372903999468 - type: nauc_precision_at_20_max value: 21.970766470092478 - type: nauc_precision_at_20_std value: 5.690019568793079 - type: nauc_precision_at_3_diff1 value: -5.293993834675436 - type: nauc_precision_at_3_max value: 33.48037221970611 - type: nauc_precision_at_3_std value: -0.9905029996040207 - type: nauc_precision_at_5_diff1 value: -12.477204961113433 - type: nauc_precision_at_5_max value: 28.41320824321574 - type: nauc_precision_at_5_std value: -0.25510168506666026 - type: nauc_recall_at_1000_diff1 value: 63.80720019823024 - type: nauc_recall_at_1000_max value: 100.0 - type: nauc_recall_at_1000_std value: 100.0 - type: nauc_recall_at_100_diff1 value: 45.99503772001805 - type: nauc_recall_at_100_max value: 53.62256247578381 - type: nauc_recall_at_100_std value: -2.1521605315502126 - type: nauc_recall_at_10_diff1 value: 51.49183566173087 - type: nauc_recall_at_10_max value: 39.94460610694432 - type: nauc_recall_at_10_std value: -27.417226994058534 - type: nauc_recall_at_1_diff1 value: 65.04324659040175 - type: nauc_recall_at_1_max value: 17.852220653388855 - type: nauc_recall_at_1_std value: -14.257753661018779 - type: nauc_recall_at_20_diff1 value: 53.65987970751146 - type: nauc_recall_at_20_max value: 48.20536243702891 - type: nauc_recall_at_20_std value: -24.77784527777353 - type: nauc_recall_at_3_diff1 value: 53.27794448209969 - type: nauc_recall_at_3_max value: 30.304767840963283 - type: nauc_recall_at_3_std value: -19.099603261339936 - type: nauc_recall_at_5_diff1 value: 53.77383683020561 - type: nauc_recall_at_5_max value: 39.58616026474047 - type: nauc_recall_at_5_std value: -23.255086482736036 - type: ndcg_at_1 value: 71.829 - type: ndcg_at_10 value: 77.425 - type: ndcg_at_100 value: 80.88 - type: ndcg_at_1000 value: 81.128 - type: ndcg_at_20 value: 79.403 - type: ndcg_at_3 value: 72.89 - type: ndcg_at_5 value: 74.521 - type: precision_at_1 value: 71.829 - type: precision_at_10 value: 17.596999999999998 - type: precision_at_100 value: 2.033 - type: precision_at_1000 value: 0.207 - type: precision_at_20 value: 9.513 - type: precision_at_3 value: 44.192 - type: precision_at_5 value: 31.776 - type: recall_at_1 value: 46.749 - type: recall_at_10 value: 85.49799999999999 - type: recall_at_100 value: 98.17099999999999 - type: recall_at_1000 value: 99.733 - type: recall_at_20 value: 91.70700000000001 - type: recall_at_3 value: 70.309 - type: recall_at_5 value: 78.507 task: type: Retrieval - dataset: config: default name: MTEB AllegroReviews revision: b89853e6de927b0e3bfa8ecc0e56fe4e02ceafc6 split: test type: PL-MTEB/allegro-reviews metrics: - type: accuracy value: 65.0 - type: f1 value: 58.85888258599016 - type: f1_weighted value: 65.99554726292321 - type: main_score value: 65.0 task: type: Classification - dataset: config: default name: MTEB ArguAna-PL revision: 63fc86750af76253e8c760fc9e534bbf24d260a2 split: test type: clarin-knext/arguana-pl metrics: - type: main_score value: 59.71300000000001 - type: map_at_1 value: 35.135 - type: map_at_10 value: 51.092000000000006 - type: map_at_100 value: 51.773 - type: map_at_1000 value: 51.776999999999994 - type: map_at_20 value: 51.665000000000006 - type: map_at_3 value: 46.574 - type: map_at_5 value: 49.032 - type: mrr_at_1 value: 36.201991465149355 - type: mrr_at_10 value: 51.546405427984475 - type: mrr_at_100 value: 52.202374673015285 - type: mrr_at_1000 value: 52.20610086068531 - type: mrr_at_20 value: 52.096805353180756 - type: mrr_at_3 value: 47.01280227596022 - type: mrr_at_5 value: 49.49146514935999 - type: nauc_map_at_1000_diff1 value: 19.758403663654388 - type: nauc_map_at_1000_max value: 1.9211716901459552 - type: nauc_map_at_1000_std value: -12.391775130617594 - type: nauc_map_at_100_diff1 value: 19.75801012476506 - type: nauc_map_at_100_max value: 1.927233271789035 - type: nauc_map_at_100_std value: -12.390686358565384 - type: nauc_map_at_10_diff1 value: 19.618023487744257 - type: nauc_map_at_10_max value: 1.948823709088292 - type: nauc_map_at_10_std value: -12.590649627823774 - type: nauc_map_at_1_diff1 value: 22.704520355653777 - type: nauc_map_at_1_max value: -0.7340073588952427 - type: nauc_map_at_1_std value: -11.685082615631233 - type: nauc_map_at_20_diff1 value: 19.710150386755245 - type: nauc_map_at_20_max value: 1.9579689185617946 - type: nauc_map_at_20_std value: -12.454848473878485 - type: nauc_map_at_3_diff1 value: 19.88571571635227 - type: nauc_map_at_3_max value: 2.2089391275055754 - type: nauc_map_at_3_std value: -12.152625563551476 - type: nauc_map_at_5_diff1 value: 19.345423817148774 - type: nauc_map_at_5_max value: 2.4471831202433783 - type: nauc_map_at_5_std value: -11.60532301686549 - type: nauc_mrr_at_1000_diff1 value: 16.90786453167799 - type: nauc_mrr_at_1000_max value: 0.65578323377857 - type: nauc_mrr_at_1000_std value: -12.395929715413015 - type: nauc_mrr_at_100_diff1 value: 16.90781127619206 - type: nauc_mrr_at_100_max value: 0.6619900297824423 - type: nauc_mrr_at_100_std value: -12.394826789608906 - type: nauc_mrr_at_10_diff1 value: 16.785894192163838 - type: nauc_mrr_at_10_max value: 0.7096666849274212 - type: nauc_mrr_at_10_std value: -12.592883550594735 - type: nauc_mrr_at_1_diff1 value: 19.59282927806732 - type: nauc_mrr_at_1_max value: -1.1271716729359413 - type: nauc_mrr_at_1_std value: -11.710668880297517 - type: nauc_mrr_at_20_diff1 value: 16.86673477981559 - type: nauc_mrr_at_20_max value: 0.6897167399764257 - type: nauc_mrr_at_20_std value: -12.464631471378414 - type: nauc_mrr_at_3_diff1 value: 17.0481261621288 - type: nauc_mrr_at_3_max value: 0.7183007174016199 - type: nauc_mrr_at_3_std value: -12.329335728574527 - type: nauc_mrr_at_5_diff1 value: 16.698916629443854 - type: nauc_mrr_at_5_max value: 1.2515514207224299 - type: nauc_mrr_at_5_std value: -11.662599392805308 - type: nauc_ndcg_at_1000_diff1 value: 19.30605856078901 - type: nauc_ndcg_at_1000_max value: 2.3402231520806835 - type: nauc_ndcg_at_1000_std value: -12.370409989770332 - type: nauc_ndcg_at_100_diff1 value: 19.31155460872256 - type: nauc_ndcg_at_100_max value: 2.510633162779702 - type: nauc_ndcg_at_100_std value: -12.313796276064673 - type: nauc_ndcg_at_10_diff1 value: 18.511651466450843 - type: nauc_ndcg_at_10_max value: 2.6756675185155263 - type: nauc_ndcg_at_10_std value: -13.573610085360095 - type: nauc_ndcg_at_1_diff1 value: 22.704520355653777 - type: nauc_ndcg_at_1_max value: -0.7340073588952427 - type: nauc_ndcg_at_1_std value: -11.685082615631233 - type: nauc_ndcg_at_20_diff1 value: 19.01305812933961 - type: nauc_ndcg_at_20_max value: 2.777977280012548 - type: nauc_ndcg_at_20_std value: -12.959515013552128 - type: nauc_ndcg_at_3_diff1 value: 19.15053976740578 - type: nauc_ndcg_at_3_max value: 3.2587972262385496 - type: nauc_ndcg_at_3_std value: -12.105808757691328 - type: nauc_ndcg_at_5_diff1 value: 18.010082675090597 - type: nauc_ndcg_at_5_max value: 3.753876824229378 - type: nauc_ndcg_at_5_std value: -11.044202434548701 - type: nauc_precision_at_1000_diff1 value: -11.75783343822487 - type: nauc_precision_at_1000_max value: 5.7856460776313465 - type: nauc_precision_at_1000_std value: 62.79171280927037 - type: nauc_precision_at_100_diff1 value: 9.08527555500537 - type: nauc_precision_at_100_max value: 36.16754653078746 - type: nauc_precision_at_100_std value: 28.37969482833522 - type: nauc_precision_at_10_diff1 value: 10.685081888632977 - type: nauc_precision_at_10_max value: 7.185779514361452 - type: nauc_precision_at_10_std value: -22.209758078034394 - type: nauc_precision_at_1_diff1 value: 22.704520355653777 - type: nauc_precision_at_1_max value: -0.7340073588952427 - type: nauc_precision_at_1_std value: -11.685082615631233 - type: nauc_precision_at_20_diff1 value: 10.0745772945806 - type: nauc_precision_at_20_max value: 16.81469938479116 - type: nauc_precision_at_20_std value: -22.804277740935298 - type: nauc_precision_at_3_diff1 value: 16.900587067301714 - type: nauc_precision_at_3_max value: 6.595958907337978 - type: nauc_precision_at_3_std value: -11.888316132805594 - type: nauc_precision_at_5_diff1 value: 12.771428972972895 - type: nauc_precision_at_5_max value: 8.79201485711544 - type: nauc_precision_at_5_std value: -8.609881800940762 - type: nauc_recall_at_1000_diff1 value: -11.757833438225305 - type: nauc_recall_at_1000_max value: 5.785646077628613 - type: nauc_recall_at_1000_std value: 62.791712809264176 - type: nauc_recall_at_100_diff1 value: 9.085275555005722 - type: nauc_recall_at_100_max value: 36.167546530787995 - type: nauc_recall_at_100_std value: 28.37969482833511 - type: nauc_recall_at_10_diff1 value: 10.68508188863288 - type: nauc_recall_at_10_max value: 7.185779514361484 - type: nauc_recall_at_10_std value: -22.209758078034465 - type: nauc_recall_at_1_diff1 value: 22.704520355653777 - type: nauc_recall_at_1_max value: -0.7340073588952427 - type: nauc_recall_at_1_std value: -11.685082615631233 - type: nauc_recall_at_20_diff1 value: 10.074577294581067 - type: nauc_recall_at_20_max value: 16.814699384791545 - type: nauc_recall_at_20_std value: -22.80427774093497 - type: nauc_recall_at_3_diff1 value: 16.900587067301768 - type: nauc_recall_at_3_max value: 6.595958907337955 - type: nauc_recall_at_3_std value: -11.888316132805613 - type: nauc_recall_at_5_diff1 value: 12.77142897297289 - type: nauc_recall_at_5_max value: 8.792014857115413 - type: nauc_recall_at_5_std value: -8.609881800940697 - type: ndcg_at_1 value: 35.135 - type: ndcg_at_10 value: 59.71300000000001 - type: ndcg_at_100 value: 62.5 - type: ndcg_at_1000 value: 62.578 - type: ndcg_at_20 value: 61.775000000000006 - type: ndcg_at_3 value: 50.336999999999996 - type: ndcg_at_5 value: 54.748 - type: precision_at_1 value: 35.135 - type: precision_at_10 value: 8.72 - type: precision_at_100 value: 0.991 - type: precision_at_1000 value: 0.1 - type: precision_at_20 value: 4.765 - type: precision_at_3 value: 20.413 - type: precision_at_5 value: 14.381 - type: recall_at_1 value: 35.135 - type: recall_at_10 value: 87.198 - type: recall_at_100 value: 99.075 - type: recall_at_1000 value: 99.644 - type: recall_at_20 value: 95.306 - type: recall_at_3 value: 61.23800000000001 - type: recall_at_5 value: 71.906 task: type: Retrieval - dataset: config: default name: MTEB CBD revision: 36ddb419bcffe6a5374c3891957912892916f28d split: test type: PL-MTEB/cbd metrics: - type: accuracy value: 84.13000000000001 - type: ap value: 38.21674564144456 - type: ap_weighted value: 38.21674564144456 - type: f1 value: 73.58128735002478 - type: f1_weighted value: 85.75596717538494 - type: main_score value: 84.13000000000001 task: type: Classification - dataset: config: default name: MTEB CDSC-E revision: 0a3d4aa409b22f80eb22cbf59b492637637b536d split: test type: PL-MTEB/cdsce-pairclassification metrics: - type: cosine_accuracy value: 89.0 - type: cosine_accuracy_threshold value: 95.30268088769837 - type: cosine_ap value: 78.23422403821777 - type: cosine_f1 value: 69.23076923076923 - type: cosine_f1_threshold value: 87.1877340095262 - type: cosine_precision value: 67.5 - type: cosine_recall value: 71.05263157894737 - type: dot_accuracy value: 88.3 - type: dot_accuracy_threshold value: 2472000.0 - type: dot_ap value: 74.26705897704197 - type: dot_f1 value: 66.49874055415617 - type: dot_f1_threshold value: 2316800.0 - type: dot_precision value: 63.76811594202898 - type: dot_recall value: 69.47368421052632 - type: euclidean_accuracy value: 89.2 - type: euclidean_accuracy_threshold value: 6878.705188647788 - type: euclidean_ap value: 78.51718555534579 - type: euclidean_f1 value: 69.54314720812182 - type: euclidean_f1_threshold value: 8323.035838252725 - type: euclidean_precision value: 67.15686274509804 - type: euclidean_recall value: 72.10526315789474 - type: main_score value: 78.51718555534579 - type: manhattan_accuracy value: 89.2 - type: manhattan_accuracy_threshold value: 326812.48528957367 - type: manhattan_ap value: 78.50895632545628 - type: manhattan_f1 value: 69.84924623115577 - type: manhattan_f1_threshold value: 398102.616417408 - type: manhattan_precision value: 66.82692307692307 - type: manhattan_recall value: 73.15789473684211 - type: max_ap value: 78.51718555534579 - type: max_f1 value: 69.84924623115577 - type: max_precision value: 67.5 - type: max_recall value: 73.15789473684211 - type: similarity_accuracy value: 89.0 - type: similarity_accuracy_threshold value: 95.30268088769837 - type: similarity_ap value: 78.23422403821777 - type: similarity_f1 value: 69.23076923076923 - type: similarity_f1_threshold value: 87.1877340095262 - type: similarity_precision value: 67.5 - type: similarity_recall value: 71.05263157894737 task: type: PairClassification - dataset: config: default name: MTEB CDSC-R revision: 1cd6abbb00df7d14be3dbd76a7dcc64b3a79a7cd split: test type: PL-MTEB/cdscr-sts metrics: - type: cosine_pearson value: 91.04238667979497 - type: cosine_spearman value: 90.96758456402505 - type: euclidean_pearson value: 88.88396869759062 - type: euclidean_spearman value: 90.80235709678217 - type: main_score value: 90.96758456402505 - type: manhattan_pearson value: 88.91331977492183 - type: manhattan_spearman value: 90.82823486754444 - type: pearson value: 91.04238667979497 - type: spearman value: 90.96758456402505 task: type: STS - dataset: config: default name: MTEB DBPedia-PL revision: 76afe41d9af165cc40999fcaa92312b8b012064a split: test type: clarin-knext/dbpedia-pl metrics: - type: main_score value: 43.189 - type: map_at_1 value: 8.838 - type: map_at_10 value: 20.335 - type: map_at_100 value: 29.818 - type: map_at_1000 value: 31.672 - type: map_at_20 value: 24.037 - type: map_at_3 value: 14.144000000000002 - type: map_at_5 value: 16.674 - type: mrr_at_1 value: 66.25 - type: mrr_at_10 value: 74.51428571428573 - type: mrr_at_100 value: 74.85025528596333 - type: mrr_at_1000 value: 74.861579760375 - type: mrr_at_20 value: 74.75227906231197 - type: mrr_at_3 value: 73.25 - type: mrr_at_5 value: 73.825 - type: nauc_map_at_1000_diff1 value: 25.397956304548963 - type: nauc_map_at_1000_max value: 34.60045634629073 - type: nauc_map_at_1000_std value: 25.484338507029523 - type: nauc_map_at_100_diff1 value: 26.732402811074362 - type: nauc_map_at_100_max value: 33.16273154550298 - type: nauc_map_at_100_std value: 22.705558316419694 - type: nauc_map_at_10_diff1 value: 31.048350740517666 - type: nauc_map_at_10_max value: 20.58247280790142 - type: nauc_map_at_10_std value: -0.3057740988996755 - type: nauc_map_at_1_diff1 value: 37.44384898753489 - type: nauc_map_at_1_max value: 2.009066872007797 - type: nauc_map_at_1_std value: -18.38972044447374 - type: nauc_map_at_20_diff1 value: 29.145950023489974 - type: nauc_map_at_20_max value: 25.337239700245075 - type: nauc_map_at_20_std value: 7.680343084384305 - type: nauc_map_at_3_diff1 value: 32.41886776815376 - type: nauc_map_at_3_max value: 8.976460728750666 - type: nauc_map_at_3_std value: -14.206927116348458 - type: nauc_map_at_5_diff1 value: 31.316919153957873 - type: nauc_map_at_5_max value: 14.015365438005226 - type: nauc_map_at_5_std value: -8.909007562143335 - type: nauc_mrr_at_1000_diff1 value: 42.77521158292109 - type: nauc_mrr_at_1000_max value: 58.03733674934908 - type: nauc_mrr_at_1000_std value: 42.65118460573791 - type: nauc_mrr_at_100_diff1 value: 42.76917109803571 - type: nauc_mrr_at_100_max value: 58.04747433083853 - type: nauc_mrr_at_100_std value: 42.65151388365855 - type: nauc_mrr_at_10_diff1 value: 42.4992726119988 - type: nauc_mrr_at_10_max value: 58.157080658302974 - type: nauc_mrr_at_10_std value: 42.98778606676595 - type: nauc_mrr_at_1_diff1 value: 46.67764597969527 - type: nauc_mrr_at_1_max value: 54.52896662427813 - type: nauc_mrr_at_1_std value: 35.71181387979735 - type: nauc_mrr_at_20_diff1 value: 42.79101300218034 - type: nauc_mrr_at_20_max value: 58.05679669975563 - type: nauc_mrr_at_20_std value: 42.72288886007032 - type: nauc_mrr_at_3_diff1 value: 41.85440967628899 - type: nauc_mrr_at_3_max value: 57.975577899726126 - type: nauc_mrr_at_3_std value: 43.523432037784985 - type: nauc_mrr_at_5_diff1 value: 42.3041465494315 - type: nauc_mrr_at_5_max value: 58.54530113479029 - type: nauc_mrr_at_5_std value: 43.2944834223015 - type: nauc_ndcg_at_1000_diff1 value: 32.16216922989725 - type: nauc_ndcg_at_1000_max value: 50.03467332768009 - type: nauc_ndcg_at_1000_std value: 42.87877265207483 - type: nauc_ndcg_at_100_diff1 value: 33.55193527551313 - type: nauc_ndcg_at_100_max value: 45.12048953873363 - type: nauc_ndcg_at_100_std value: 34.788021436199024 - type: nauc_ndcg_at_10_diff1 value: 31.14168233882658 - type: nauc_ndcg_at_10_max value: 45.31079148382448 - type: nauc_ndcg_at_10_std value: 28.555214349385466 - type: nauc_ndcg_at_1_diff1 value: 45.12481069889602 - type: nauc_ndcg_at_1_max value: 45.93377570654117 - type: nauc_ndcg_at_1_std value: 26.672617000885186 - type: nauc_ndcg_at_20_diff1 value: 31.81216979830056 - type: nauc_ndcg_at_20_max value: 41.93464767693644 - type: nauc_ndcg_at_20_std value: 26.08707327004535 - type: nauc_ndcg_at_3_diff1 value: 29.90627202771331 - type: nauc_ndcg_at_3_max value: 46.50414958925517 - type: nauc_ndcg_at_3_std value: 29.66009841753563 - type: nauc_ndcg_at_5_diff1 value: 29.08122779713697 - type: nauc_ndcg_at_5_max value: 46.81499760516951 - type: nauc_ndcg_at_5_std value: 29.935930977468267 - type: nauc_precision_at_1000_diff1 value: -18.71150014402453 - type: nauc_precision_at_1000_max value: -0.9220395765472844 - type: nauc_precision_at_1000_std value: 7.219897945975822 - type: nauc_precision_at_100_diff1 value: -8.609528664023014 - type: nauc_precision_at_100_max value: 29.147048677242864 - type: nauc_precision_at_100_std value: 44.958041507680036 - type: nauc_precision_at_10_diff1 value: 2.8689201908213477 - type: nauc_precision_at_10_max value: 44.40893361361308 - type: nauc_precision_at_10_std value: 47.18569807586499 - type: nauc_precision_at_1_diff1 value: 46.01228536231763 - type: nauc_precision_at_1_max value: 54.30280987857099 - type: nauc_precision_at_1_std value: 36.923128493492776 - type: nauc_precision_at_20_diff1 value: -1.9783515948740122 - type: nauc_precision_at_20_max value: 38.42066921295958 - type: nauc_precision_at_20_std value: 47.41935674153161 - type: nauc_precision_at_3_diff1 value: 9.877584475384026 - type: nauc_precision_at_3_max value: 44.77006526403546 - type: nauc_precision_at_3_std value: 39.51299545977156 - type: nauc_precision_at_5_diff1 value: 5.096217475317008 - type: nauc_precision_at_5_max value: 45.66716959157208 - type: nauc_precision_at_5_std value: 42.651208343259505 - type: nauc_recall_at_1000_diff1 value: 25.395292649442965 - type: nauc_recall_at_1000_max value: 44.94193476114992 - type: nauc_recall_at_1000_std value: 53.58345238223027 - type: nauc_recall_at_100_diff1 value: 23.962022146293293 - type: nauc_recall_at_100_max value: 32.15140842028602 - type: nauc_recall_at_100_std value: 30.57126984952762 - type: nauc_recall_at_10_diff1 value: 28.120539807446004 - type: nauc_recall_at_10_max value: 18.154834280193572 - type: nauc_recall_at_10_std value: -0.6032386653260938 - type: nauc_recall_at_1_diff1 value: 37.44384898753489 - type: nauc_recall_at_1_max value: 2.009066872007797 - type: nauc_recall_at_1_std value: -18.38972044447374 - type: nauc_recall_at_20_diff1 value: 23.438945970294554 - type: nauc_recall_at_20_max value: 17.201259624644326 - type: nauc_recall_at_20_std value: 3.75587033487961 - type: nauc_recall_at_3_diff1 value: 29.867460507200587 - type: nauc_recall_at_3_max value: 8.066960542463528 - type: nauc_recall_at_3_std value: -15.13440571172203 - type: nauc_recall_at_5_diff1 value: 28.657118879661887 - type: nauc_recall_at_5_max value: 12.942552735963842 - type: nauc_recall_at_5_std value: -9.57735672972808 - type: ndcg_at_1 value: 54.50000000000001 - type: ndcg_at_10 value: 43.189 - type: ndcg_at_100 value: 48.595 - type: ndcg_at_1000 value: 55.681000000000004 - type: ndcg_at_20 value: 43.09 - type: ndcg_at_3 value: 47.599000000000004 - type: ndcg_at_5 value: 44.907000000000004 - type: precision_at_1 value: 66.5 - type: precision_at_10 value: 35.725 - type: precision_at_100 value: 11.583 - type: precision_at_1000 value: 2.302 - type: precision_at_20 value: 27.375 - type: precision_at_3 value: 52.0 - type: precision_at_5 value: 44.7 - type: recall_at_1 value: 8.838 - type: recall_at_10 value: 25.424999999999997 - type: recall_at_100 value: 55.632000000000005 - type: recall_at_1000 value: 77.857 - type: recall_at_20 value: 34.458 - type: recall_at_3 value: 15.229999999999999 - type: recall_at_5 value: 18.872 task: type: Retrieval - dataset: config: default name: MTEB 8TagsClustering revision: None split: test type: PL-MTEB/8tags-clustering metrics: - type: main_score value: 50.28804848851286 - type: v_measure value: 50.28804848851286 - type: v_measure_std value: 2.9879120747919505 task: type: Clustering - dataset: config: default name: MTEB FiQA-PL revision: 2e535829717f8bf9dc829b7f911cc5bbd4e6608e split: test type: clarin-knext/fiqa-pl metrics: - type: main_score value: 46.121 - type: map_at_1 value: 24.027 - type: map_at_10 value: 38.14 - type: map_at_100 value: 40.092 - type: map_at_1000 value: 40.266000000000005 - type: map_at_20 value: 39.195 - type: map_at_3 value: 33.415 - type: map_at_5 value: 36.115 - type: mrr_at_1 value: 46.60493827160494 - type: mrr_at_10 value: 54.70305457573974 - type: mrr_at_100 value: 55.355642920233414 - type: mrr_at_1000 value: 55.3908291424442 - type: mrr_at_20 value: 55.00793641725012 - type: mrr_at_3 value: 52.3148148148148 - type: mrr_at_5 value: 53.54166666666664 - type: nauc_map_at_1000_diff1 value: 37.73510043188139 - type: nauc_map_at_1000_max value: 28.32920495001755 - type: nauc_map_at_1000_std value: 2.1388839190211293 - type: nauc_map_at_100_diff1 value: 37.670108404247685 - type: nauc_map_at_100_max value: 28.227406812543826 - type: nauc_map_at_100_std value: 2.120931632442644 - type: nauc_map_at_10_diff1 value: 37.465256098544174 - type: nauc_map_at_10_max value: 27.091226456549666 - type: nauc_map_at_10_std value: 1.1173775566235409 - type: nauc_map_at_1_diff1 value: 41.23855326212752 - type: nauc_map_at_1_max value: 21.290748552864557 - type: nauc_map_at_1_std value: -0.8385928448565472 - type: nauc_map_at_20_diff1 value: 37.47054494805535 - type: nauc_map_at_20_max value: 27.729045702955386 - type: nauc_map_at_20_std value: 1.7216485460777051 - type: nauc_map_at_3_diff1 value: 37.262641031829105 - type: nauc_map_at_3_max value: 23.89124216989901 - type: nauc_map_at_3_std value: -0.14736489529369678 - type: nauc_map_at_5_diff1 value: 37.054030521972926 - type: nauc_map_at_5_max value: 25.37485175729055 - type: nauc_map_at_5_std value: 0.1603899014557275 - type: nauc_mrr_at_1000_diff1 value: 45.74249029214392 - type: nauc_mrr_at_1000_max value: 36.07619933100338 - type: nauc_mrr_at_1000_std value: 4.393752835100674 - type: nauc_mrr_at_100_diff1 value: 45.72338919745602 - type: nauc_mrr_at_100_max value: 36.07500193737586 - type: nauc_mrr_at_100_std value: 4.415904610787372 - type: nauc_mrr_at_10_diff1 value: 45.712821401955814 - type: nauc_mrr_at_10_max value: 36.077633940467855 - type: nauc_mrr_at_10_std value: 4.31515612100577 - type: nauc_mrr_at_1_diff1 value: 48.95197646135339 - type: nauc_mrr_at_1_max value: 37.627960253727124 - type: nauc_mrr_at_1_std value: 4.355410396712492 - type: nauc_mrr_at_20_diff1 value: 45.657031672968316 - type: nauc_mrr_at_20_max value: 36.02034080808377 - type: nauc_mrr_at_20_std value: 4.291569107759258 - type: nauc_mrr_at_3_diff1 value: 46.14016248486381 - type: nauc_mrr_at_3_max value: 35.096997959937816 - type: nauc_mrr_at_3_std value: 3.473234729162835 - type: nauc_mrr_at_5_diff1 value: 46.044456362138746 - type: nauc_mrr_at_5_max value: 35.54259698630834 - type: nauc_mrr_at_5_std value: 3.242035621890524 - type: nauc_ndcg_at_1000_diff1 value: 39.37342092420808 - type: nauc_ndcg_at_1000_max value: 32.34854163612446 - type: nauc_ndcg_at_1000_std value: 4.9764682793258865 - type: nauc_ndcg_at_100_diff1 value: 38.396532780365966 - type: nauc_ndcg_at_100_max value: 31.427345966345072 - type: nauc_ndcg_at_100_std value: 5.436384757156155 - type: nauc_ndcg_at_10_diff1 value: 38.33852883060773 - type: nauc_ndcg_at_10_max value: 29.405844267873825 - type: nauc_ndcg_at_10_std value: 2.9724473995284453 - type: nauc_ndcg_at_1_diff1 value: 49.360894087944914 - type: nauc_ndcg_at_1_max value: 37.10711812240423 - type: nauc_ndcg_at_1_std value: 3.8523559329866988 - type: nauc_ndcg_at_20_diff1 value: 38.050204646363945 - type: nauc_ndcg_at_20_max value: 29.935603389108866 - type: nauc_ndcg_at_20_std value: 3.779925764680313 - type: nauc_ndcg_at_3_diff1 value: 39.4668764835337 - type: nauc_ndcg_at_3_max value: 30.65976708125836 - type: nauc_ndcg_at_3_std value: 1.2337033504877237 - type: nauc_ndcg_at_5_diff1 value: 38.86503445443355 - type: nauc_ndcg_at_5_max value: 29.0023578220992 - type: nauc_ndcg_at_5_std value: 0.8206100069462643 - type: nauc_precision_at_1000_diff1 value: 5.84775168273073 - type: nauc_precision_at_1000_max value: 27.58660371315182 - type: nauc_precision_at_1000_std value: 9.028324162807364 - type: nauc_precision_at_100_diff1 value: 10.655637431827838 - type: nauc_precision_at_100_max value: 32.11889757111383 - type: nauc_precision_at_100_std value: 13.051376462007925 - type: nauc_precision_at_10_diff1 value: 20.55227291550576 - type: nauc_precision_at_10_max value: 34.48969436232284 - type: nauc_precision_at_10_std value: 7.57890876950882 - type: nauc_precision_at_1_diff1 value: 49.360894087944914 - type: nauc_precision_at_1_max value: 37.10711812240423 - type: nauc_precision_at_1_std value: 3.8523559329866988 - type: nauc_precision_at_20_diff1 value: 16.62880025315897 - type: nauc_precision_at_20_max value: 34.15703662717139 - type: nauc_precision_at_20_std value: 10.909431920732883 - type: nauc_precision_at_3_diff1 value: 28.04332082306772 - type: nauc_precision_at_3_max value: 31.009374202971753 - type: nauc_precision_at_3_std value: 2.307756409916575 - type: nauc_precision_at_5_diff1 value: 24.824270715808705 - type: nauc_precision_at_5_max value: 31.644036540931886 - type: nauc_precision_at_5_std value: 2.958068954639614 - type: nauc_recall_at_1000_diff1 value: 23.79234063489045 - type: nauc_recall_at_1000_max value: 26.76365425679858 - type: nauc_recall_at_1000_std value: 23.815318997671913 - type: nauc_recall_at_100_diff1 value: 22.399781833514737 - type: nauc_recall_at_100_max value: 23.192360958839174 - type: nauc_recall_at_100_std value: 15.984687692762742 - type: nauc_recall_at_10_diff1 value: 28.512649044683837 - type: nauc_recall_at_10_max value: 22.77819651497193 - type: nauc_recall_at_10_std value: 4.646633382718951 - type: nauc_recall_at_1_diff1 value: 41.23855326212752 - type: nauc_recall_at_1_max value: 21.290748552864557 - type: nauc_recall_at_1_std value: -0.8385928448565472 - type: nauc_recall_at_20_diff1 value: 26.797853661700632 - type: nauc_recall_at_20_max value: 21.9956231017133 - type: nauc_recall_at_20_std value: 5.664775183514371 - type: nauc_recall_at_3_diff1 value: 31.42511076281081 - type: nauc_recall_at_3_max value: 19.459398184547652 - type: nauc_recall_at_3_std value: -0.8592886454260257 - type: nauc_recall_at_5_diff1 value: 29.62950699804912 - type: nauc_recall_at_5_max value: 19.941323519486684 - type: nauc_recall_at_5_std value: -0.45387351120880465 - type: ndcg_at_1 value: 46.451 - type: ndcg_at_10 value: 46.121 - type: ndcg_at_100 value: 52.830999999999996 - type: ndcg_at_1000 value: 55.557 - type: ndcg_at_20 value: 48.535000000000004 - type: ndcg_at_3 value: 42.178 - type: ndcg_at_5 value: 43.406 - type: precision_at_1 value: 46.451 - type: precision_at_10 value: 12.562000000000001 - type: precision_at_100 value: 1.963 - type: precision_at_1000 value: 0.244 - type: precision_at_20 value: 7.392 - type: precision_at_3 value: 27.572000000000003 - type: precision_at_5 value: 20.031 - type: recall_at_1 value: 24.027 - type: recall_at_10 value: 52.61900000000001 - type: recall_at_100 value: 77.491 - type: recall_at_1000 value: 93.55 - type: recall_at_20 value: 59.745000000000005 - type: recall_at_3 value: 37.765 - type: recall_at_5 value: 44.304 task: type: Retrieval - dataset: config: default name: MTEB HotpotQA-PL revision: a0bd479ac97b4ccb5bd6ce320c415d0bb4beb907 split: test type: clarin-knext/hotpotqa-pl metrics: - type: main_score value: 77.02799999999999 - type: map_at_1 value: 41.249 - type: map_at_10 value: 69.512 - type: map_at_100 value: 70.291 - type: map_at_1000 value: 70.334 - type: map_at_20 value: 69.992 - type: map_at_3 value: 65.751 - type: map_at_5 value: 68.161 - type: mrr_at_1 value: 82.4983119513842 - type: mrr_at_10 value: 87.71202426502866 - type: mrr_at_100 value: 87.84265780907221 - type: mrr_at_1000 value: 87.8455843626266 - type: mrr_at_20 value: 87.80640011547308 - type: mrr_at_3 value: 86.94575737114536 - type: mrr_at_5 value: 87.46770200315063 - type: nauc_map_at_1000_diff1 value: 17.17119899625707 - type: nauc_map_at_1000_max value: 29.981569339485393 - type: nauc_map_at_1000_std value: 8.93659568948167 - type: nauc_map_at_100_diff1 value: 17.156175947340035 - type: nauc_map_at_100_max value: 29.988121004348194 - type: nauc_map_at_100_std value: 8.967947232110745 - type: nauc_map_at_10_diff1 value: 16.854416108818132 - type: nauc_map_at_10_max value: 29.784211249360194 - type: nauc_map_at_10_std value: 8.535227936720936 - type: nauc_map_at_1_diff1 value: 68.01294545515707 - type: nauc_map_at_1_max value: 47.51019900345037 - type: nauc_map_at_1_std value: -1.7951406243808212 - type: nauc_map_at_20_diff1 value: 16.993955459776572 - type: nauc_map_at_20_max value: 29.920806300647463 - type: nauc_map_at_20_std value: 8.873597327714583 - type: nauc_map_at_3_diff1 value: 16.16514623575243 - type: nauc_map_at_3_max value: 27.62371849413713 - type: nauc_map_at_3_std value: 5.131406130565191 - type: nauc_map_at_5_diff1 value: 16.507863832657364 - type: nauc_map_at_5_max value: 28.9019090072195 - type: nauc_map_at_5_std value: 7.2380930617814645 - type: nauc_mrr_at_1000_diff1 value: 66.74502991743417 - type: nauc_mrr_at_1000_max value: 50.29274140603486 - type: nauc_mrr_at_1000_std value: 1.602388931386098 - type: nauc_mrr_at_100_diff1 value: 66.7413605208101 - type: nauc_mrr_at_100_max value: 50.29720043419606 - type: nauc_mrr_at_100_std value: 1.612142495535232 - type: nauc_mrr_at_10_diff1 value: 66.71814591414376 - type: nauc_mrr_at_10_max value: 50.39851050116519 - type: nauc_mrr_at_10_std value: 1.7339878916186384 - type: nauc_mrr_at_1_diff1 value: 68.01294545515707 - type: nauc_mrr_at_1_max value: 47.627701029006225 - type: nauc_mrr_at_1_std value: -1.442043059079073 - type: nauc_mrr_at_20_diff1 value: 66.72944815863312 - type: nauc_mrr_at_20_max value: 50.325719646409716 - type: nauc_mrr_at_20_std value: 1.6584317196476688 - type: nauc_mrr_at_3_diff1 value: 66.29662294615758 - type: nauc_mrr_at_3_max value: 50.29363488669571 - type: nauc_mrr_at_3_std value: 1.1373012069481296 - type: nauc_mrr_at_5_diff1 value: 66.70959181668684 - type: nauc_mrr_at_5_max value: 50.42831108375743 - type: nauc_mrr_at_5_std value: 1.5492429855609648 - type: nauc_ndcg_at_1000_diff1 value: 24.337157353044912 - type: nauc_ndcg_at_1000_max value: 35.021784629126984 - type: nauc_ndcg_at_1000_std value: 11.976738067383161 - type: nauc_ndcg_at_100_diff1 value: 23.584427352691776 - type: nauc_ndcg_at_100_max value: 35.12304754035805 - type: nauc_ndcg_at_100_std value: 12.921291623167921 - type: nauc_ndcg_at_10_diff1 value: 22.057127915032765 - type: nauc_ndcg_at_10_max value: 34.09397142140321 - type: nauc_ndcg_at_10_std value: 11.21339882108658 - type: nauc_ndcg_at_1_diff1 value: 68.01294545515707 - type: nauc_ndcg_at_1_max value: 47.51019900345037 - type: nauc_ndcg_at_1_std value: -1.7951406243808212 - type: nauc_ndcg_at_20_diff1 value: 22.404347553479102 - type: nauc_ndcg_at_20_max value: 34.50508324969608 - type: nauc_ndcg_at_20_std value: 12.281993331498175 - type: nauc_ndcg_at_3_diff1 value: 21.21895220595676 - type: nauc_ndcg_at_3_max value: 30.76465236403928 - type: nauc_ndcg_at_3_std value: 5.501903724385424 - type: nauc_ndcg_at_5_diff1 value: 21.489825424548258 - type: nauc_ndcg_at_5_max value: 32.43517409935615 - type: nauc_ndcg_at_5_std value: 8.59021290966302 - type: nauc_precision_at_1000_diff1 value: 9.056916578488696 - type: nauc_precision_at_1000_max value: 47.29861770129213 - type: nauc_precision_at_1000_std value: 60.06028316961357 - type: nauc_precision_at_100_diff1 value: 6.853208191063939 - type: nauc_precision_at_100_max value: 40.23686318254916 - type: nauc_precision_at_100_std value: 44.69884156134862 - type: nauc_precision_at_10_diff1 value: 7.7572606953149315 - type: nauc_precision_at_10_max value: 33.24412509121427 - type: nauc_precision_at_10_std value: 22.894891705425753 - type: nauc_precision_at_1_diff1 value: 68.01294545515707 - type: nauc_precision_at_1_max value: 47.51019900345037 - type: nauc_precision_at_1_std value: -1.7951406243808212 - type: nauc_precision_at_20_diff1 value: 6.102789021481188 - type: nauc_precision_at_20_max value: 34.384739158981084 - type: nauc_precision_at_20_std value: 29.40165302735249 - type: nauc_precision_at_3_diff1 value: 10.004182813463276 - type: nauc_precision_at_3_max value: 27.07527926636925 - type: nauc_precision_at_3_std value: 8.034252288165805 - type: nauc_precision_at_5_diff1 value: 8.672082689816547 - type: nauc_precision_at_5_max value: 29.352582129843867 - type: nauc_precision_at_5_std value: 14.456464951944461 - type: nauc_recall_at_1000_diff1 value: 9.056916578488018 - type: nauc_recall_at_1000_max value: 47.29861770129215 - type: nauc_recall_at_1000_std value: 60.06028316961315 - type: nauc_recall_at_100_diff1 value: 6.853208191063934 - type: nauc_recall_at_100_max value: 40.23686318254888 - type: nauc_recall_at_100_std value: 44.698841561348615 - type: nauc_recall_at_10_diff1 value: 7.7572606953149394 - type: nauc_recall_at_10_max value: 33.244125091214286 - type: nauc_recall_at_10_std value: 22.894891705425863 - type: nauc_recall_at_1_diff1 value: 68.01294545515707 - type: nauc_recall_at_1_max value: 47.51019900345037 - type: nauc_recall_at_1_std value: -1.7951406243808212 - type: nauc_recall_at_20_diff1 value: 6.102789021481126 - type: nauc_recall_at_20_max value: 34.38473915898118 - type: nauc_recall_at_20_std value: 29.40165302735251 - type: nauc_recall_at_3_diff1 value: 10.004182813463203 - type: nauc_recall_at_3_max value: 27.07527926636916 - type: nauc_recall_at_3_std value: 8.034252288165728 - type: nauc_recall_at_5_diff1 value: 8.672082689816364 - type: nauc_recall_at_5_max value: 29.352582129843714 - type: nauc_recall_at_5_std value: 14.4564649519445 - type: ndcg_at_1 value: 82.498 - type: ndcg_at_10 value: 77.02799999999999 - type: ndcg_at_100 value: 79.593 - type: ndcg_at_1000 value: 80.372 - type: ndcg_at_20 value: 78.194 - type: ndcg_at_3 value: 71.932 - type: ndcg_at_5 value: 74.878 - type: precision_at_1 value: 82.498 - type: precision_at_10 value: 16.289 - type: precision_at_100 value: 1.8259999999999998 - type: precision_at_1000 value: 0.193 - type: precision_at_20 value: 8.519 - type: precision_at_3 value: 46.851 - type: precision_at_5 value: 30.436000000000003 - type: recall_at_1 value: 41.249 - type: recall_at_10 value: 81.44500000000001 - type: recall_at_100 value: 91.323 - type: recall_at_1000 value: 96.44200000000001 - type: recall_at_20 value: 85.18599999999999 - type: recall_at_3 value: 70.277 - type: recall_at_5 value: 76.09 task: type: Retrieval - dataset: config: default name: MTEB MSMARCO-PL revision: 8634c07806d5cce3a6138e260e59b81760a0a640 split: test type: clarin-knext/msmarco-pl metrics: - type: main_score value: 72.695 - type: map_at_1 value: 2.313 - type: map_at_10 value: 16.541 - type: map_at_100 value: 42.664 - type: map_at_1000 value: 51.048 - type: map_at_20 value: 25.691000000000003 - type: map_at_3 value: 6.8580000000000005 - type: map_at_5 value: 10.227 - type: mrr_at_1 value: 90.69767441860465 - type: mrr_at_10 value: 94.65116279069768 - type: mrr_at_100 value: 94.65116279069768 - type: mrr_at_1000 value: 94.65116279069768 - type: mrr_at_20 value: 94.65116279069768 - type: mrr_at_3 value: 94.18604651162791 - type: mrr_at_5 value: 94.65116279069768 - type: nauc_map_at_1000_diff1 value: -19.394271777832838 - type: nauc_map_at_1000_max value: 35.63073356621754 - type: nauc_map_at_1000_std value: 56.92803671553409 - type: nauc_map_at_100_diff1 value: -7.023340458676494 - type: nauc_map_at_100_max value: 22.967662469404267 - type: nauc_map_at_100_std value: 28.64423344417142 - type: nauc_map_at_10_diff1 value: 18.22452762970126 - type: nauc_map_at_10_max value: 3.235969423980127 - type: nauc_map_at_10_std value: -11.528499499305529 - type: nauc_map_at_1_diff1 value: 17.90743559505749 - type: nauc_map_at_1_max value: -14.61627654448527 - type: nauc_map_at_1_std value: -24.262430292012667 - type: nauc_map_at_20_diff1 value: 14.96422992084746 - type: nauc_map_at_20_max value: 11.128128185086132 - type: nauc_map_at_20_std value: -0.4087236026844547 - type: nauc_map_at_3_diff1 value: 16.45733174189393 - type: nauc_map_at_3_max value: -14.88196784500194 - type: nauc_map_at_3_std value: -26.096323520383446 - type: nauc_map_at_5_diff1 value: 17.572159494245003 - type: nauc_map_at_5_max value: -11.206812710229503 - type: nauc_map_at_5_std value: -22.27070819579704 - type: nauc_mrr_at_1000_diff1 value: 33.66069097978205 - type: nauc_mrr_at_1000_max value: 43.87773602456895 - type: nauc_mrr_at_1000_std value: 52.33730714398662 - type: nauc_mrr_at_100_diff1 value: 33.66069097978205 - type: nauc_mrr_at_100_max value: 43.87773602456895 - type: nauc_mrr_at_100_std value: 52.33730714398662 - type: nauc_mrr_at_10_diff1 value: 33.66069097978205 - type: nauc_mrr_at_10_max value: 43.87773602456895 - type: nauc_mrr_at_10_std value: 52.33730714398662 - type: nauc_mrr_at_1_diff1 value: 23.709794626749783 - type: nauc_mrr_at_1_max value: 35.45939642825464 - type: nauc_mrr_at_1_std value: 45.18790321558505 - type: nauc_mrr_at_20_diff1 value: 33.66069097978205 - type: nauc_mrr_at_20_max value: 43.87773602456895 - type: nauc_mrr_at_20_std value: 52.33730714398662 - type: nauc_mrr_at_3_diff1 value: 38.96783570139972 - type: nauc_mrr_at_3_max value: 48.367517142603624 - type: nauc_mrr_at_3_std value: 56.15032257246786 - type: nauc_mrr_at_5_diff1 value: 33.66069097978205 - type: nauc_mrr_at_5_max value: 43.87773602456895 - type: nauc_mrr_at_5_std value: 52.33730714398662 - type: nauc_ndcg_at_1000_diff1 value: -8.409227649777549 - type: nauc_ndcg_at_1000_max value: 55.08579408014661 - type: nauc_ndcg_at_1000_std value: 64.71829411541155 - type: nauc_ndcg_at_100_diff1 value: -12.171382005828134 - type: nauc_ndcg_at_100_max value: 37.279599751187895 - type: nauc_ndcg_at_100_std value: 55.59571261330682 - type: nauc_ndcg_at_10_diff1 value: -4.2745893875224645 - type: nauc_ndcg_at_10_max value: 35.61094191299521 - type: nauc_ndcg_at_10_std value: 31.49122710738599 - type: nauc_ndcg_at_1_diff1 value: 34.77341575621081 - type: nauc_ndcg_at_1_max value: 18.418784098194983 - type: nauc_ndcg_at_1_std value: 3.6003144907881026 - type: nauc_ndcg_at_20_diff1 value: -16.937600290863816 - type: nauc_ndcg_at_20_max value: 28.731002593372718 - type: nauc_ndcg_at_20_std value: 40.140028262395546 - type: nauc_ndcg_at_3_diff1 value: 21.008563623057892 - type: nauc_ndcg_at_3_max value: 32.092932411602945 - type: nauc_ndcg_at_3_std value: 7.783159518591246 - type: nauc_ndcg_at_5_diff1 value: 13.35248395075747 - type: nauc_ndcg_at_5_max value: 33.48637127489678 - type: nauc_ndcg_at_5_std value: 19.883656903878986 - type: nauc_precision_at_1000_diff1 value: -34.613170483366815 - type: nauc_precision_at_1000_max value: 14.178980568050093 - type: nauc_precision_at_1000_std value: 53.45813399059421 - type: nauc_precision_at_100_diff1 value: -40.67552345859168 - type: nauc_precision_at_100_max value: 23.091965607829138 - type: nauc_precision_at_100_std value: 62.39644907525577 - type: nauc_precision_at_10_diff1 value: -29.61210257317124 - type: nauc_precision_at_10_max value: 43.992102732918255 - type: nauc_precision_at_10_std value: 67.25524849542518 - type: nauc_precision_at_1_diff1 value: 23.709794626749783 - type: nauc_precision_at_1_max value: 35.45939642825464 - type: nauc_precision_at_1_std value: 45.18790321558505 - type: nauc_precision_at_20_diff1 value: -38.29110052486433 - type: nauc_precision_at_20_max value: 28.73705296191401 - type: nauc_precision_at_20_std value: 62.12026159344505 - type: nauc_precision_at_3_diff1 value: -4.950069185044093 - type: nauc_precision_at_3_max value: 35.30311413187648 - type: nauc_precision_at_3_std value: 37.24789627772557 - type: nauc_precision_at_5_diff1 value: -8.259725731846123 - type: nauc_precision_at_5_max value: 33.985287538899314 - type: nauc_precision_at_5_std value: 53.59550306044433 - type: nauc_recall_at_1000_diff1 value: -5.996961409631926 - type: nauc_recall_at_1000_max value: 63.118266233402764 - type: nauc_recall_at_1000_std value: 69.5649709802058 - type: nauc_recall_at_100_diff1 value: 6.920650261229799 - type: nauc_recall_at_100_max value: 26.76777278523633 - type: nauc_recall_at_100_std value: 24.81349844560708 - type: nauc_recall_at_10_diff1 value: 18.636579796911292 - type: nauc_recall_at_10_max value: 2.214374250576099 - type: nauc_recall_at_10_std value: -12.939953791707651 - type: nauc_recall_at_1_diff1 value: 17.90743559505749 - type: nauc_recall_at_1_max value: -14.61627654448527 - type: nauc_recall_at_1_std value: -24.262430292012667 - type: nauc_recall_at_20_diff1 value: 17.612041689452855 - type: nauc_recall_at_20_max value: 11.182632726686007 - type: nauc_recall_at_20_std value: -2.4835954401161864 - type: nauc_recall_at_3_diff1 value: 16.773341381117 - type: nauc_recall_at_3_max value: -15.051242807277163 - type: nauc_recall_at_3_std value: -26.410274593618038 - type: nauc_recall_at_5_diff1 value: 17.091861029537423 - type: nauc_recall_at_5_max value: -13.243464985211395 - type: nauc_recall_at_5_std value: -23.92982354951768 - type: ndcg_at_1 value: 78.295 - type: ndcg_at_10 value: 72.695 - type: ndcg_at_100 value: 65.69500000000001 - type: ndcg_at_1000 value: 73.359 - type: ndcg_at_20 value: 69.16499999999999 - type: ndcg_at_3 value: 76.632 - type: ndcg_at_5 value: 74.024 - type: precision_at_1 value: 90.69800000000001 - type: precision_at_10 value: 81.628 - type: precision_at_100 value: 38.116 - type: precision_at_1000 value: 7.199999999999999 - type: precision_at_20 value: 72.209 - type: precision_at_3 value: 89.922 - type: precision_at_5 value: 86.047 - type: recall_at_1 value: 2.313 - type: recall_at_10 value: 17.48 - type: recall_at_100 value: 53.937000000000005 - type: recall_at_1000 value: 80.018 - type: recall_at_20 value: 28.081 - type: recall_at_3 value: 6.927 - type: recall_at_5 value: 10.575 task: type: Retrieval - dataset: config: pl name: MTEB MassiveIntentClassification (pl) revision: 4672e20407010da34463acc759c162ca9734bca6 split: test type: mteb/amazon_massive_intent metrics: - type: accuracy value: 79.41492938802959 - type: f1 value: 75.75917683785259 - type: f1_weighted value: 79.4156392656699 - type: main_score value: 79.41492938802959 task: type: Classification - dataset: config: pl name: MTEB MassiveScenarioClassification (pl) revision: fad2c6e8459f9e1c45d9315f4953d921437d70f8 split: test type: mteb/amazon_massive_scenario metrics: - type: accuracy value: 81.9334229993275 - type: f1 value: 81.40628785444537 - type: f1_weighted value: 81.79807477693303 - type: main_score value: 81.9334229993275 task: type: Classification - dataset: config: default name: MTEB NFCorpus-PL revision: 9a6f9567fda928260afed2de480d79c98bf0bec0 split: test type: clarin-knext/nfcorpus-pl metrics: - type: main_score value: 36.723 - type: map_at_1 value: 5.8069999999999995 - type: map_at_10 value: 13.602 - type: map_at_100 value: 17.196 - type: map_at_1000 value: 18.609 - type: map_at_20 value: 15.146999999999998 - type: map_at_3 value: 9.594999999999999 - type: map_at_5 value: 11.453000000000001 - type: mrr_at_1 value: 47.368421052631575 - type: mrr_at_10 value: 55.60703228659884 - type: mrr_at_100 value: 56.1552975760445 - type: mrr_at_1000 value: 56.19164342988321 - type: mrr_at_20 value: 55.922507068281476 - type: mrr_at_3 value: 53.147574819401456 - type: mrr_at_5 value: 54.680082559339525 - type: nauc_map_at_1000_diff1 value: 34.05763404594125 - type: nauc_map_at_1000_max value: 29.5226776533209 - type: nauc_map_at_1000_std value: 15.427632324819914 - type: nauc_map_at_100_diff1 value: 34.80313586539057 - type: nauc_map_at_100_max value: 27.999543781245972 - type: nauc_map_at_100_std value: 11.502430185601197 - type: nauc_map_at_10_diff1 value: 39.10493763818235 - type: nauc_map_at_10_max value: 20.299110129894572 - type: nauc_map_at_10_std value: -1.8131312981171384 - type: nauc_map_at_1_diff1 value: 54.952292547558436 - type: nauc_map_at_1_max value: 13.172173380536137 - type: nauc_map_at_1_std value: -11.135859432447047 - type: nauc_map_at_20_diff1 value: 36.56338939350608 - type: nauc_map_at_20_max value: 24.057778180377355 - type: nauc_map_at_20_std value: 4.030543599731532 - type: nauc_map_at_3_diff1 value: 46.798195082350766 - type: nauc_map_at_3_max value: 14.899395608553915 - type: nauc_map_at_3_std value: -10.505614189182307 - type: nauc_map_at_5_diff1 value: 42.83953515294862 - type: nauc_map_at_5_max value: 17.04727497975375 - type: nauc_map_at_5_std value: -7.6517071380275885 - type: nauc_mrr_at_1000_diff1 value: 41.44193432540061 - type: nauc_mrr_at_1000_max value: 39.88086824180341 - type: nauc_mrr_at_1000_std value: 27.351885880283966 - type: nauc_mrr_at_100_diff1 value: 41.43357468563369 - type: nauc_mrr_at_100_max value: 39.91394628214467 - type: nauc_mrr_at_100_std value: 27.37166382203234 - type: nauc_mrr_at_10_diff1 value: 41.46082695650948 - type: nauc_mrr_at_10_max value: 39.858957188572944 - type: nauc_mrr_at_10_std value: 27.18216001182641 - type: nauc_mrr_at_1_diff1 value: 41.485448798176904 - type: nauc_mrr_at_1_max value: 33.6944538535235 - type: nauc_mrr_at_1_std value: 22.826701578387503 - type: nauc_mrr_at_20_diff1 value: 41.374365310091925 - type: nauc_mrr_at_20_max value: 39.923859616197035 - type: nauc_mrr_at_20_std value: 27.27268109687068 - type: nauc_mrr_at_3_diff1 value: 42.1244757279239 - type: nauc_mrr_at_3_max value: 38.380669877043864 - type: nauc_mrr_at_3_std value: 25.734391560690224 - type: nauc_mrr_at_5_diff1 value: 41.26497822292423 - type: nauc_mrr_at_5_max value: 39.17164048501762 - type: nauc_mrr_at_5_std value: 26.304110615701987 - type: nauc_ndcg_at_1000_diff1 value: 31.76845316166595 - type: nauc_ndcg_at_1000_max value: 44.0530198648453 - type: nauc_ndcg_at_1000_std value: 33.37050209530549 - type: nauc_ndcg_at_100_diff1 value: 31.70167104254346 - type: nauc_ndcg_at_100_max value: 38.98577219865644 - type: nauc_ndcg_at_100_std value: 28.46948949404448 - type: nauc_ndcg_at_10_diff1 value: 31.41371490994258 - type: nauc_ndcg_at_10_max value: 36.46974014607837 - type: nauc_ndcg_at_10_std value: 28.214061102873274 - type: nauc_ndcg_at_1_diff1 value: 45.195218239572185 - type: nauc_ndcg_at_1_max value: 32.47174554115089 - type: nauc_ndcg_at_1_std value: 22.252970640869655 - type: nauc_ndcg_at_20_diff1 value: 30.22073304733139 - type: nauc_ndcg_at_20_max value: 36.85722580956459 - type: nauc_ndcg_at_20_std value: 28.82508960932221 - type: nauc_ndcg_at_3_diff1 value: 34.85087007597385 - type: nauc_ndcg_at_3_max value: 35.08880030166066 - type: nauc_ndcg_at_3_std value: 24.477164602350427 - type: nauc_ndcg_at_5_diff1 value: 32.15269255562139 - type: nauc_ndcg_at_5_max value: 36.26512978748847 - type: nauc_ndcg_at_5_std value: 26.121143638336193 - type: nauc_precision_at_1000_diff1 value: -5.016344866521763 - type: nauc_precision_at_1000_max value: 13.76155613533569 - type: nauc_precision_at_1000_std value: 42.87650310943072 - type: nauc_precision_at_100_diff1 value: -2.4765231121724867 - type: nauc_precision_at_100_max value: 26.413714147361173 - type: nauc_precision_at_100_std value: 52.07869389693284 - type: nauc_precision_at_10_diff1 value: 9.381859834804454 - type: nauc_precision_at_10_max value: 36.79686689654208 - type: nauc_precision_at_10_std value: 41.450385008923874 - type: nauc_precision_at_1_diff1 value: 43.14276503972391 - type: nauc_precision_at_1_max value: 33.23669937901841 - type: nauc_precision_at_1_std value: 23.574191783291614 - type: nauc_precision_at_20_diff1 value: 3.3554639781732143 - type: nauc_precision_at_20_max value: 35.07048369650734 - type: nauc_precision_at_20_std value: 46.90757933302204 - type: nauc_precision_at_3_diff1 value: 22.3364560733951 - type: nauc_precision_at_3_max value: 34.49198383469041 - type: nauc_precision_at_3_std value: 28.30886758592867 - type: nauc_precision_at_5_diff1 value: 14.242157915266043 - type: nauc_precision_at_5_max value: 36.78665790141447 - type: nauc_precision_at_5_std value: 34.22226904133568 - type: nauc_recall_at_1000_diff1 value: 6.177080203711223 - type: nauc_recall_at_1000_max value: 20.36718691855502 - type: nauc_recall_at_1000_std value: 21.44974953318914 - type: nauc_recall_at_100_diff1 value: 16.98521396327983 - type: nauc_recall_at_100_max value: 25.739641139625473 - type: nauc_recall_at_100_std value: 16.08045361596745 - type: nauc_recall_at_10_diff1 value: 28.066091446759465 - type: nauc_recall_at_10_max value: 15.875422037194987 - type: nauc_recall_at_10_std value: -2.7729209404094712 - type: nauc_recall_at_1_diff1 value: 54.952292547558436 - type: nauc_recall_at_1_max value: 13.172173380536137 - type: nauc_recall_at_1_std value: -11.135859432447047 - type: nauc_recall_at_20_diff1 value: 22.454203317605455 - type: nauc_recall_at_20_max value: 19.38991609441149 - type: nauc_recall_at_20_std value: 3.3669889925713683 - type: nauc_recall_at_3_diff1 value: 42.41050348142469 - type: nauc_recall_at_3_max value: 14.345477767632861 - type: nauc_recall_at_3_std value: -11.275161125178107 - type: nauc_recall_at_5_diff1 value: 34.851159133502286 - type: nauc_recall_at_5_max value: 15.03263812713638 - type: nauc_recall_at_5_std value: -9.042538295018138 - type: ndcg_at_1 value: 44.891999999999996 - type: ndcg_at_10 value: 36.723 - type: ndcg_at_100 value: 33.101 - type: ndcg_at_1000 value: 41.493 - type: ndcg_at_20 value: 34.14 - type: ndcg_at_3 value: 41.131 - type: ndcg_at_5 value: 39.446999999999996 - type: precision_at_1 value: 46.749 - type: precision_at_10 value: 27.616000000000003 - type: precision_at_100 value: 8.372 - type: precision_at_1000 value: 2.095 - type: precision_at_20 value: 20.294 - type: precision_at_3 value: 38.493 - type: precision_at_5 value: 34.427 - type: recall_at_1 value: 5.8069999999999995 - type: recall_at_10 value: 18.444 - type: recall_at_100 value: 33.655 - type: recall_at_1000 value: 63.839999999999996 - type: recall_at_20 value: 22.205 - type: recall_at_3 value: 10.61 - type: recall_at_5 value: 13.938999999999998 task: type: Retrieval - dataset: config: default name: MTEB NQ-PL revision: f171245712cf85dd4700b06bef18001578d0ca8d split: test type: clarin-knext/nq-pl metrics: - type: main_score value: 56.854000000000006 - type: map_at_1 value: 34.514 - type: map_at_10 value: 49.644 - type: map_at_100 value: 50.608 - type: map_at_1000 value: 50.635 - type: map_at_20 value: 50.305 - type: map_at_3 value: 45.672000000000004 - type: map_at_5 value: 48.089 - type: mrr_at_1 value: 38.78910776361529 - type: mrr_at_10 value: 52.148397984145234 - type: mrr_at_100 value: 52.852966946095215 - type: mrr_at_1000 value: 52.87105017860762 - type: mrr_at_20 value: 52.64188894631607 - type: mrr_at_3 value: 48.97643877945134 - type: mrr_at_5 value: 50.92168791039002 - type: nauc_map_at_1000_diff1 value: 37.02156712167867 - type: nauc_map_at_1000_max value: 30.9541229199217 - type: nauc_map_at_1000_std value: 7.320033004454671 - type: nauc_map_at_100_diff1 value: 37.02236703226826 - type: nauc_map_at_100_max value: 30.9697676745961 - type: nauc_map_at_100_std value: 7.33984133867723 - type: nauc_map_at_10_diff1 value: 36.90102700826612 - type: nauc_map_at_10_max value: 30.785723842405183 - type: nauc_map_at_10_std value: 6.779448226242215 - type: nauc_map_at_1_diff1 value: 39.909029450982274 - type: nauc_map_at_1_max value: 25.241631663639062 - type: nauc_map_at_1_std value: 3.9346798436914625 - type: nauc_map_at_20_diff1 value: 37.01885833177735 - type: nauc_map_at_20_max value: 30.93864719019393 - type: nauc_map_at_20_std value: 7.157784404582363 - type: nauc_map_at_3_diff1 value: 36.66395294442894 - type: nauc_map_at_3_max value: 28.73917625955397 - type: nauc_map_at_3_std value: 4.974442294121807 - type: nauc_map_at_5_diff1 value: 36.50200331851477 - type: nauc_map_at_5_max value: 30.19694653814823 - type: nauc_map_at_5_std value: 6.080701892676308 - type: nauc_mrr_at_1000_diff1 value: 37.13771503608112 - type: nauc_mrr_at_1000_max value: 31.751547147247507 - type: nauc_mrr_at_1000_std value: 9.508614158791604 - type: nauc_mrr_at_100_diff1 value: 37.13715249048103 - type: nauc_mrr_at_100_max value: 31.76453363846907 - type: nauc_mrr_at_100_std value: 9.527333431366577 - type: nauc_mrr_at_10_diff1 value: 37.04617391414406 - type: nauc_mrr_at_10_max value: 31.835558691659767 - type: nauc_mrr_at_10_std value: 9.403478249864207 - type: nauc_mrr_at_1_diff1 value: 40.24340603514061 - type: nauc_mrr_at_1_max value: 27.892025295592664 - type: nauc_mrr_at_1_std value: 6.948060152377137 - type: nauc_mrr_at_20_diff1 value: 37.13679664662962 - type: nauc_mrr_at_20_max value: 31.80571193908972 - type: nauc_mrr_at_20_std value: 9.463516427443066 - type: nauc_mrr_at_3_diff1 value: 36.59947958587673 - type: nauc_mrr_at_3_max value: 30.56905612034133 - type: nauc_mrr_at_3_std value: 8.213473085446296 - type: nauc_mrr_at_5_diff1 value: 36.66740305041658 - type: nauc_mrr_at_5_max value: 31.470226490982878 - type: nauc_mrr_at_5_std value: 9.02109643375307 - type: nauc_ndcg_at_1000_diff1 value: 36.60296185088649 - type: nauc_ndcg_at_1000_max value: 33.40562074993109 - type: nauc_ndcg_at_1000_std value: 10.60845451213325 - type: nauc_ndcg_at_100_diff1 value: 36.59946610918652 - type: nauc_ndcg_at_100_max value: 33.9570260243297 - type: nauc_ndcg_at_100_std value: 11.340469448481196 - type: nauc_ndcg_at_10_diff1 value: 36.14418247401987 - type: nauc_ndcg_at_10_max value: 33.451039871075345 - type: nauc_ndcg_at_10_std value: 9.272972801419813 - type: nauc_ndcg_at_1_diff1 value: 40.07169143996099 - type: nauc_ndcg_at_1_max value: 27.943354680588055 - type: nauc_ndcg_at_1_std value: 7.036639009967827 - type: nauc_ndcg_at_20_diff1 value: 36.51152244027151 - type: nauc_ndcg_at_20_max value: 33.89378482325653 - type: nauc_ndcg_at_20_std value: 10.342721315866635 - type: nauc_ndcg_at_3_diff1 value: 35.4822845318483 - type: nauc_ndcg_at_3_max value: 29.912345910181415 - type: nauc_ndcg_at_3_std value: 5.9694134283330715 - type: nauc_ndcg_at_5_diff1 value: 35.221776161219466 - type: nauc_ndcg_at_5_max value: 32.1072171248216 - type: nauc_ndcg_at_5_std value: 7.670174771541694 - type: nauc_precision_at_1000_diff1 value: -4.285000172509594 - type: nauc_precision_at_1000_max value: 14.600633321561062 - type: nauc_precision_at_1000_std value: 21.991435704986305 - type: nauc_precision_at_100_diff1 value: 1.7266493932509126 - type: nauc_precision_at_100_max value: 22.9932202096611 - type: nauc_precision_at_100_std value: 27.464183639561075 - type: nauc_precision_at_10_diff1 value: 16.16723142044687 - type: nauc_precision_at_10_max value: 32.61177863055963 - type: nauc_precision_at_10_std value: 19.30609156634069 - type: nauc_precision_at_1_diff1 value: 40.07169143996099 - type: nauc_precision_at_1_max value: 27.943354680588055 - type: nauc_precision_at_1_std value: 7.036639009967827 - type: nauc_precision_at_20_diff1 value: 10.986359452355082 - type: nauc_precision_at_20_max value: 30.001608294285408 - type: nauc_precision_at_20_std value: 23.470161266132752 - type: nauc_precision_at_3_diff1 value: 25.021299827765368 - type: nauc_precision_at_3_max value: 31.112435175145354 - type: nauc_precision_at_3_std value: 9.97933575854508 - type: nauc_precision_at_5_diff1 value: 19.85258852538675 - type: nauc_precision_at_5_max value: 33.017057636553346 - type: nauc_precision_at_5_std value: 14.226398540277224 - type: nauc_recall_at_1000_diff1 value: 32.956809555733294 - type: nauc_recall_at_1000_max value: 81.17616645437344 - type: nauc_recall_at_1000_std value: 80.81894015338722 - type: nauc_recall_at_100_diff1 value: 34.21543518933059 - type: nauc_recall_at_100_max value: 64.60424388566007 - type: nauc_recall_at_100_std value: 55.36262550526809 - type: nauc_recall_at_10_diff1 value: 31.854572843060865 - type: nauc_recall_at_10_max value: 41.47697651985406 - type: nauc_recall_at_10_std value: 15.449819317346778 - type: nauc_recall_at_1_diff1 value: 39.909029450982274 - type: nauc_recall_at_1_max value: 25.241631663639062 - type: nauc_recall_at_1_std value: 3.9346798436914625 - type: nauc_recall_at_20_diff1 value: 33.155424988870266 - type: nauc_recall_at_20_max value: 47.41147314334969 - type: nauc_recall_at_20_std value: 24.122822585459915 - type: nauc_recall_at_3_diff1 value: 31.030069463711484 - type: nauc_recall_at_3_max value: 30.349471998175105 - type: nauc_recall_at_3_std value: 5.3792560913820635 - type: nauc_recall_at_5_diff1 value: 29.662449422215627 - type: nauc_recall_at_5_max value: 35.59583981361554 - type: nauc_recall_at_5_std value: 9.138475426366536 - type: ndcg_at_1 value: 38.847 - type: ndcg_at_10 value: 56.854000000000006 - type: ndcg_at_100 value: 60.767 - type: ndcg_at_1000 value: 61.399 - type: ndcg_at_20 value: 58.941 - type: ndcg_at_3 value: 49.576 - type: ndcg_at_5 value: 53.502 - type: precision_at_1 value: 38.847 - type: precision_at_10 value: 9.064 - type: precision_at_100 value: 1.127 - type: precision_at_1000 value: 0.11900000000000001 - type: precision_at_20 value: 5.038 - type: precision_at_3 value: 22.335 - type: precision_at_5 value: 15.689 - type: recall_at_1 value: 34.514 - type: recall_at_10 value: 76.152 - type: recall_at_100 value: 92.837 - type: recall_at_1000 value: 97.596 - type: recall_at_20 value: 83.77799999999999 - type: recall_at_3 value: 57.484 - type: recall_at_5 value: 66.476 task: type: Retrieval - dataset: config: default name: MTEB PAC revision: None split: test type: laugustyniak/abusive-clauses-pl metrics: - type: accuracy value: 67.24297712134376 - type: accuracy_stderr value: 4.77558207347837 - type: ap value: 77.38171975466854 - type: ap_stderr value: 2.5801970175320394 - type: f1 value: 65.21823897814332 - type: f1_stderr value: 4.317111734308895 - type: main_score value: 67.24297712134376 task: type: Classification - dataset: config: default name: MTEB PSC revision: d05a294af9e1d3ff2bfb6b714e08a24a6cabc669 split: test type: PL-MTEB/psc-pairclassification metrics: - type: cosine_accuracy value: 97.95918367346938 - type: cosine_accuracy_threshold value: 59.87724328133361 - type: cosine_ap value: 99.24498625606927 - type: cosine_f1 value: 96.6867469879518 - type: cosine_f1_threshold value: 59.87724328133361 - type: cosine_precision value: 95.53571428571429 - type: cosine_recall value: 97.86585365853658 - type: dot_accuracy value: 98.51576994434137 - type: dot_accuracy_threshold value: 1574400.0 - type: dot_ap value: 99.28566232682996 - type: dot_f1 value: 97.57575757575758 - type: dot_f1_threshold value: 1564800.0 - type: dot_precision value: 96.98795180722891 - type: dot_recall value: 98.17073170731707 - type: euclidean_accuracy value: 97.6808905380334 - type: euclidean_accuracy_threshold value: 14418.957939643331 - type: euclidean_ap value: 99.0876340868033 - type: euclidean_f1 value: 96.24060150375941 - type: euclidean_f1_threshold value: 14442.183182634264 - type: euclidean_precision value: 94.95548961424333 - type: euclidean_recall value: 97.5609756097561 - type: main_score value: 99.28566232682996 - type: manhattan_accuracy value: 97.86641929499072 - type: manhattan_accuracy_threshold value: 681802.1857857704 - type: manhattan_ap value: 99.08465290287205 - type: manhattan_f1 value: 96.52042360060513 - type: manhattan_f1_threshold value: 681802.1857857704 - type: manhattan_precision value: 95.7957957957958 - type: manhattan_recall value: 97.2560975609756 - type: max_ap value: 99.28566232682996 - type: max_f1 value: 97.57575757575758 - type: max_precision value: 96.98795180722891 - type: max_recall value: 98.17073170731707 - type: similarity_accuracy value: 97.95918367346938 - type: similarity_accuracy_threshold value: 59.87724328133361 - type: similarity_ap value: 99.24498625606927 - type: similarity_f1 value: 96.6867469879518 - type: similarity_f1_threshold value: 59.87724328133361 - type: similarity_precision value: 95.53571428571429 - type: similarity_recall value: 97.86585365853658 task: type: PairClassification - dataset: config: default name: MTEB PolEmo2.0-IN revision: d90724373c70959f17d2331ad51fb60c71176b03 split: test type: PL-MTEB/polemo2_in metrics: - type: accuracy value: 90.41551246537396 - type: f1 value: 89.15361039614409 - type: f1_weighted value: 90.69893050097603 - type: main_score value: 90.41551246537396 task: type: Classification - dataset: config: default name: MTEB PolEmo2.0-OUT revision: 6a21ab8716e255ab1867265f8b396105e8aa63d4 split: test type: PL-MTEB/polemo2_out metrics: - type: accuracy value: 77.77327935222672 - type: f1 value: 61.238079022455636 - type: f1_weighted value: 80.58753601509183 - type: main_score value: 77.77327935222672 task: type: Classification - dataset: config: default name: MTEB PPC revision: None split: test type: PL-MTEB/ppc-pairclassification metrics: - type: cos_sim_accuracy value: 87.2 - type: cos_sim_accuracy_threshold value: 83.69773167092553 - type: cos_sim_ap value: 95.43345251568122 - type: cos_sim_f1 value: 89.82785602503913 - type: cos_sim_f1_threshold value: 81.2116503074739 - type: cos_sim_precision value: 85.16320474777447 - type: cos_sim_recall value: 95.03311258278146 - type: dot_accuracy value: 85.9 - type: dot_accuracy_threshold value: 2177600.0 - type: dot_ap value: 92.4192102018206 - type: dot_f1 value: 88.9238020424195 - type: dot_f1_threshold value: 2163200.0 - type: dot_precision value: 84.60388639760838 - type: dot_recall value: 93.70860927152319 - type: euclidean_accuracy value: 87.5 - type: euclidean_accuracy_threshold value: 9325.450203438862 - type: euclidean_ap value: 95.42730698295347 - type: euclidean_f1 value: 89.92747784045125 - type: euclidean_f1_threshold value: 9325.450203438862 - type: euclidean_precision value: 87.59811616954474 - type: euclidean_recall value: 92.3841059602649 - type: manhattan_accuracy value: 87.5 - type: manhattan_accuracy_threshold value: 441412.88244724274 - type: manhattan_ap value: 95.4277447451651 - type: manhattan_f1 value: 89.92747784045125 - type: manhattan_f1_threshold value: 441412.88244724274 - type: manhattan_precision value: 87.59811616954474 - type: manhattan_recall value: 92.3841059602649 - type: max_accuracy value: 87.5 - type: max_ap value: 95.43345251568122 - type: max_f1 value: 89.92747784045125 task: type: PairClassification - dataset: config: default name: MTEB Quora-PL revision: 0be27e93455051e531182b85e85e425aba12e9d4 split: test type: clarin-knext/quora-pl metrics: - type: main_score value: 84.47099999999999 - type: map_at_1 value: 65.892 - type: map_at_10 value: 80.11500000000001 - type: map_at_100 value: 80.861 - type: map_at_1000 value: 80.879 - type: map_at_20 value: 80.604 - type: map_at_3 value: 76.97 - type: map_at_5 value: 78.926 - type: mrr_at_1 value: 75.83 - type: mrr_at_10 value: 83.2125238095233 - type: mrr_at_100 value: 83.38714262504709 - type: mrr_at_1000 value: 83.38942088013238 - type: mrr_at_20 value: 83.34284466299037 - type: mrr_at_3 value: 81.95333333333281 - type: mrr_at_5 value: 82.78533333333272 - type: nauc_map_at_1000_diff1 value: 73.95721764018812 - type: nauc_map_at_1000_max value: 9.653675847999432 - type: nauc_map_at_1000_std value: -42.35408133902171 - type: nauc_map_at_100_diff1 value: 73.96621756991526 - type: nauc_map_at_100_max value: 9.618124708373092 - type: nauc_map_at_100_std value: -42.41429680546156 - type: nauc_map_at_10_diff1 value: 74.20643666348498 - type: nauc_map_at_10_max value: 9.056688996919677 - type: nauc_map_at_10_std value: -44.13396437616006 - type: nauc_map_at_1_diff1 value: 77.18196114257519 - type: nauc_map_at_1_max value: 7.840648640771136 - type: nauc_map_at_1_std value: -39.84395715001256 - type: nauc_map_at_20_diff1 value: 74.03475632514551 - type: nauc_map_at_20_max value: 9.385795565805118 - type: nauc_map_at_20_std value: -43.160299598965466 - type: nauc_map_at_3_diff1 value: 74.43855921599284 - type: nauc_map_at_3_max value: 7.574218825911361 - type: nauc_map_at_3_std value: -46.1476276122436 - type: nauc_map_at_5_diff1 value: 74.38688915461512 - type: nauc_map_at_5_max value: 8.557764506539128 - type: nauc_map_at_5_std value: -45.53897898458085 - type: nauc_mrr_at_1000_diff1 value: 74.0311045258841 - type: nauc_mrr_at_1000_max value: 11.885448379701055 - type: nauc_mrr_at_1000_std value: -38.16008409213179 - type: nauc_mrr_at_100_diff1 value: 74.03074603058893 - type: nauc_mrr_at_100_max value: 11.886356221882725 - type: nauc_mrr_at_100_std value: -38.159139191997795 - type: nauc_mrr_at_10_diff1 value: 73.99521522874129 - type: nauc_mrr_at_10_max value: 11.77749620520773 - type: nauc_mrr_at_10_std value: -38.266295250166635 - type: nauc_mrr_at_1_diff1 value: 75.53192564838908 - type: nauc_mrr_at_1_max value: 12.979267595721275 - type: nauc_mrr_at_1_std value: -36.634066084632785 - type: nauc_mrr_at_20_diff1 value: 74.01273934757484 - type: nauc_mrr_at_20_max value: 11.887566738728225 - type: nauc_mrr_at_20_std value: -38.169250252410485 - type: nauc_mrr_at_3_diff1 value: 73.6073534511043 - type: nauc_mrr_at_3_max value: 11.450856365709727 - type: nauc_mrr_at_3_std value: -38.767141663073964 - type: nauc_mrr_at_5_diff1 value: 73.84950218235583 - type: nauc_mrr_at_5_max value: 11.787394554048813 - type: nauc_mrr_at_5_std value: -38.57240589862417 - type: nauc_ndcg_at_1000_diff1 value: 73.51677487598074 - type: nauc_ndcg_at_1000_max value: 10.72929244202152 - type: nauc_ndcg_at_1000_std value: -39.92813917654933 - type: nauc_ndcg_at_100_diff1 value: 73.53904136553481 - type: nauc_ndcg_at_100_max value: 10.569310211635521 - type: nauc_ndcg_at_100_std value: -40.12206261908318 - type: nauc_ndcg_at_10_diff1 value: 73.55958917204208 - type: nauc_ndcg_at_10_max value: 9.255791947077263 - type: nauc_ndcg_at_10_std value: -42.7856138240991 - type: nauc_ndcg_at_1_diff1 value: 75.34289960079188 - type: nauc_ndcg_at_1_max value: 13.499789436258705 - type: nauc_ndcg_at_1_std value: -35.91483904818284 - type: nauc_ndcg_at_20_diff1 value: 73.48070745481307 - type: nauc_ndcg_at_20_max value: 9.92427572953505 - type: nauc_ndcg_at_20_std value: -41.55653404596579 - type: nauc_ndcg_at_3_diff1 value: 72.72072901275445 - type: nauc_ndcg_at_3_max value: 8.303708237302729 - type: nauc_ndcg_at_3_std value: -43.618531107389344 - type: nauc_ndcg_at_5_diff1 value: 73.30060059269601 - type: nauc_ndcg_at_5_max value: 8.915386932153249 - type: nauc_ndcg_at_5_std value: -44.088053429661 - type: nauc_precision_at_1000_diff1 value: -41.540517884119524 - type: nauc_precision_at_1000_max value: 6.9361565712971265 - type: nauc_precision_at_1000_std value: 42.39482890919027 - type: nauc_precision_at_100_diff1 value: -40.609576663184896 - type: nauc_precision_at_100_max value: 6.302451339507686 - type: nauc_precision_at_100_std value: 41.30693233869549 - type: nauc_precision_at_10_diff1 value: -30.91653155031006 - type: nauc_precision_at_10_max value: 4.84981614338782 - type: nauc_precision_at_10_std value: 24.47022404030676 - type: nauc_precision_at_1_diff1 value: 75.34289960079188 - type: nauc_precision_at_1_max value: 13.499789436258705 - type: nauc_precision_at_1_std value: -35.91483904818284 - type: nauc_precision_at_20_diff1 value: -36.75164419452007 - type: nauc_precision_at_20_max value: 5.440757182282365 - type: nauc_precision_at_20_std value: 33.08928025809355 - type: nauc_precision_at_3_diff1 value: -5.3240699725635565 - type: nauc_precision_at_3_max value: 5.156636102003736 - type: nauc_precision_at_3_std value: -0.9779263105110453 - type: nauc_precision_at_5_diff1 value: -19.92133198420086 - type: nauc_precision_at_5_max value: 5.432766335564369 - type: nauc_precision_at_5_std value: 11.417736295996392 - type: nauc_recall_at_1000_diff1 value: 56.57663068186203 - type: nauc_recall_at_1000_max value: 25.80329039728696 - type: nauc_recall_at_1000_std value: 57.82937604195464 - type: nauc_recall_at_100_diff1 value: 67.25188672746224 - type: nauc_recall_at_100_max value: 6.879939694351325 - type: nauc_recall_at_100_std value: -30.098258041087096 - type: nauc_recall_at_10_diff1 value: 68.00694154421653 - type: nauc_recall_at_10_max value: 0.7226814903576098 - type: nauc_recall_at_10_std value: -52.980002751088215 - type: nauc_recall_at_1_diff1 value: 77.18196114257519 - type: nauc_recall_at_1_max value: 7.840648640771136 - type: nauc_recall_at_1_std value: -39.84395715001256 - type: nauc_recall_at_20_diff1 value: 66.56016564739411 - type: nauc_recall_at_20_max value: 1.919044428493598 - type: nauc_recall_at_20_std value: -49.5380686276396 - type: nauc_recall_at_3_diff1 value: 69.83247207081557 - type: nauc_recall_at_3_max value: 2.395588418833963 - type: nauc_recall_at_3_std value: -52.11119790224493 - type: nauc_recall_at_5_diff1 value: 69.25881483845956 - type: nauc_recall_at_5_max value: 2.9185552604991716 - type: nauc_recall_at_5_std value: -54.376346690212095 - type: ndcg_at_1 value: 75.92 - type: ndcg_at_10 value: 84.47099999999999 - type: ndcg_at_100 value: 86.11999999999999 - type: ndcg_at_1000 value: 86.276 - type: ndcg_at_20 value: 85.37599999999999 - type: ndcg_at_3 value: 81.0 - type: ndcg_at_5 value: 82.88799999999999 - type: precision_at_1 value: 75.92 - type: precision_at_10 value: 12.987000000000002 - type: precision_at_100 value: 1.5190000000000001 - type: precision_at_1000 value: 0.156 - type: precision_at_20 value: 6.977 - type: precision_at_3 value: 35.573 - type: precision_at_5 value: 23.566000000000003 - type: recall_at_1 value: 65.892 - type: recall_at_10 value: 93.318 - type: recall_at_100 value: 99.124 - type: recall_at_1000 value: 99.92699999999999 - type: recall_at_20 value: 96.256 - type: recall_at_3 value: 83.69 - type: recall_at_5 value: 88.783 task: type: Retrieval - dataset: config: default name: MTEB SCIDOCS-PL revision: 45452b03f05560207ef19149545f168e596c9337 split: test type: clarin-knext/scidocs-pl metrics: - type: main_score value: 19.528000000000002 - type: map_at_1 value: 4.5280000000000005 - type: map_at_10 value: 11.649 - type: map_at_100 value: 14.019 - type: map_at_1000 value: 14.35 - type: map_at_20 value: 12.866 - type: map_at_3 value: 8.35 - type: map_at_5 value: 9.84 - type: mrr_at_1 value: 22.3 - type: mrr_at_10 value: 32.690039682539656 - type: mrr_at_100 value: 33.91097016542133 - type: mrr_at_1000 value: 33.96940693754695 - type: mrr_at_20 value: 33.418312740750785 - type: mrr_at_3 value: 29.4 - type: mrr_at_5 value: 31.21999999999997 - type: nauc_map_at_1000_diff1 value: 20.52578935318615 - type: nauc_map_at_1000_max value: 28.28553814852898 - type: nauc_map_at_1000_std value: 18.74384140790138 - type: nauc_map_at_100_diff1 value: 20.508083204903077 - type: nauc_map_at_100_max value: 28.281447260273346 - type: nauc_map_at_100_std value: 18.51851601604162 - type: nauc_map_at_10_diff1 value: 21.028884157759624 - type: nauc_map_at_10_max value: 26.98935951161403 - type: nauc_map_at_10_std value: 14.434790357547536 - type: nauc_map_at_1_diff1 value: 23.406427416653127 - type: nauc_map_at_1_max value: 21.759624726647303 - type: nauc_map_at_1_std value: 8.335925909478444 - type: nauc_map_at_20_diff1 value: 20.370301978337785 - type: nauc_map_at_20_max value: 27.30787972231405 - type: nauc_map_at_20_std value: 16.166505401287353 - type: nauc_map_at_3_diff1 value: 23.920717676009453 - type: nauc_map_at_3_max value: 26.061264285994124 - type: nauc_map_at_3_std value: 10.707123907182902 - type: nauc_map_at_5_diff1 value: 22.180679453453557 - type: nauc_map_at_5_max value: 26.85332935641574 - type: nauc_map_at_5_std value: 12.316377808191762 - type: nauc_mrr_at_1000_diff1 value: 21.49186339320302 - type: nauc_mrr_at_1000_max value: 24.329921012356493 - type: nauc_mrr_at_1000_std value: 13.6080824939291 - type: nauc_mrr_at_100_diff1 value: 21.47653180378912 - type: nauc_mrr_at_100_max value: 24.34218235410752 - type: nauc_mrr_at_100_std value: 13.646711743513668 - type: nauc_mrr_at_10_diff1 value: 21.487198850706935 - type: nauc_mrr_at_10_max value: 24.32385099521571 - type: nauc_mrr_at_10_std value: 13.26596223383694 - type: nauc_mrr_at_1_diff1 value: 23.19221955587559 - type: nauc_mrr_at_1_max value: 21.963004569187575 - type: nauc_mrr_at_1_std value: 8.799819519408619 - type: nauc_mrr_at_20_diff1 value: 21.51014357510076 - type: nauc_mrr_at_20_max value: 24.376067405199347 - type: nauc_mrr_at_20_std value: 13.643597889716563 - type: nauc_mrr_at_3_diff1 value: 22.60437837853161 - type: nauc_mrr_at_3_max value: 23.58608363876532 - type: nauc_mrr_at_3_std value: 11.887163540535768 - type: nauc_mrr_at_5_diff1 value: 21.919324914716633 - type: nauc_mrr_at_5_max value: 23.71458680225389 - type: nauc_mrr_at_5_std value: 12.507643886191785 - type: nauc_ndcg_at_1000_diff1 value: 18.546848864440005 - type: nauc_ndcg_at_1000_max value: 30.031984469206325 - type: nauc_ndcg_at_1000_std value: 26.561149084437485 - type: nauc_ndcg_at_100_diff1 value: 18.76271748622068 - type: nauc_ndcg_at_100_max value: 30.180887663861306 - type: nauc_ndcg_at_100_std value: 25.50551358758007 - type: nauc_ndcg_at_10_diff1 value: 19.861367738304697 - type: nauc_ndcg_at_10_max value: 27.360442235691522 - type: nauc_ndcg_at_10_std value: 16.476546243351976 - type: nauc_ndcg_at_1_diff1 value: 23.56715803292495 - type: nauc_ndcg_at_1_max value: 22.29229945166374 - type: nauc_ndcg_at_1_std value: 8.43434671818737 - type: nauc_ndcg_at_20_diff1 value: 18.885059883708053 - type: nauc_ndcg_at_20_max value: 27.78854464221595 - type: nauc_ndcg_at_20_std value: 19.404353378015255 - type: nauc_ndcg_at_3_diff1 value: 23.34227259398943 - type: nauc_ndcg_at_3_max value: 25.75899010582446 - type: nauc_ndcg_at_3_std value: 12.097012181915954 - type: nauc_ndcg_at_5_diff1 value: 21.599246331396863 - type: nauc_ndcg_at_5_max value: 26.6575824351444 - type: nauc_ndcg_at_5_std value: 14.029006846982394 - type: nauc_precision_at_1000_diff1 value: 4.880571159099271 - type: nauc_precision_at_1000_max value: 24.693741787360725 - type: nauc_precision_at_1000_std value: 41.00756555344345 - type: nauc_precision_at_100_diff1 value: 10.440170876298648 - type: nauc_precision_at_100_max value: 28.942738351320408 - type: nauc_precision_at_100_std value: 36.921704945977446 - type: nauc_precision_at_10_diff1 value: 15.55680558043308 - type: nauc_precision_at_10_max value: 27.31414489241847 - type: nauc_precision_at_10_std value: 19.76275914256793 - type: nauc_precision_at_1_diff1 value: 23.56715803292495 - type: nauc_precision_at_1_max value: 22.29229945166374 - type: nauc_precision_at_1_std value: 8.43434671818737 - type: nauc_precision_at_20_diff1 value: 12.57247210423589 - type: nauc_precision_at_20_max value: 25.978951783180946 - type: nauc_precision_at_20_std value: 23.89998191646426 - type: nauc_precision_at_3_diff1 value: 22.61273732758558 - type: nauc_precision_at_3_max value: 26.51246898792034 - type: nauc_precision_at_3_std value: 13.618855663226162 - type: nauc_precision_at_5_diff1 value: 19.216237125486472 - type: nauc_precision_at_5_max value: 27.491221626577868 - type: nauc_precision_at_5_std value: 16.448119031617793 - type: nauc_recall_at_1000_diff1 value: 5.787043341957982 - type: nauc_recall_at_1000_max value: 25.922109246772763 - type: nauc_recall_at_1000_std value: 43.03768522656805 - type: nauc_recall_at_100_diff1 value: 10.696362559629796 - type: nauc_recall_at_100_max value: 29.335080453227146 - type: nauc_recall_at_100_std value: 37.271217586452124 - type: nauc_recall_at_10_diff1 value: 15.458092305569215 - type: nauc_recall_at_10_max value: 27.24445210740807 - type: nauc_recall_at_10_std value: 19.71157635644842 - type: nauc_recall_at_1_diff1 value: 23.406427416653127 - type: nauc_recall_at_1_max value: 21.759624726647303 - type: nauc_recall_at_1_std value: 8.335925909478444 - type: nauc_recall_at_20_diff1 value: 12.666354755313089 - type: nauc_recall_at_20_max value: 26.089770792562327 - type: nauc_recall_at_20_std value: 24.153776619741254 - type: nauc_recall_at_3_diff1 value: 22.545408113368953 - type: nauc_recall_at_3_max value: 26.18564049945919 - type: nauc_recall_at_3_std value: 13.308772571657293 - type: nauc_recall_at_5_diff1 value: 19.063078320434958 - type: nauc_recall_at_5_max value: 27.15038597116091 - type: nauc_recall_at_5_std value: 16.202694888143302 - type: ndcg_at_1 value: 22.2 - type: ndcg_at_10 value: 19.528000000000002 - type: ndcg_at_100 value: 28.444000000000003 - type: ndcg_at_1000 value: 33.826 - type: ndcg_at_20 value: 22.746 - type: ndcg_at_3 value: 18.413 - type: ndcg_at_5 value: 15.927 - type: precision_at_1 value: 22.2 - type: precision_at_10 value: 10.24 - type: precision_at_100 value: 2.3040000000000003 - type: precision_at_1000 value: 0.358 - type: precision_at_20 value: 6.97 - type: precision_at_3 value: 17.299999999999997 - type: precision_at_5 value: 13.919999999999998 - type: recall_at_1 value: 4.5280000000000005 - type: recall_at_10 value: 20.757 - type: recall_at_100 value: 46.75 - type: recall_at_1000 value: 72.738 - type: recall_at_20 value: 28.28 - type: recall_at_3 value: 10.558 - type: recall_at_5 value: 14.148 task: type: Retrieval - dataset: config: default name: MTEB SICK-E-PL revision: 71bba34b0ece6c56dfcf46d9758a27f7a90f17e9 split: test type: PL-MTEB/sicke-pl-pairclassification metrics: - type: cosine_accuracy value: 87.50509580105992 - type: cosine_accuracy_threshold value: 89.01510631979949 - type: cosine_ap value: 85.58291779193907 - type: cosine_f1 value: 77.58919293384136 - type: cosine_f1_threshold value: 87.10908804245841 - type: cosine_precision value: 75.52258934592044 - type: cosine_recall value: 79.77207977207978 - type: dot_accuracy value: 83.9380350591113 - type: dot_accuracy_threshold value: 2292800.0 - type: dot_ap value: 77.56937485120034 - type: dot_f1 value: 73.32065906210391 - type: dot_f1_threshold value: 2190400.0 - type: dot_precision value: 66.03881278538812 - type: dot_recall value: 82.4074074074074 - type: euclidean_accuracy value: 87.89237668161435 - type: euclidean_accuracy_threshold value: 7497.701400069587 - type: euclidean_ap value: 85.97216152106346 - type: euclidean_f1 value: 77.97228300510578 - type: euclidean_f1_threshold value: 7799.027816670506 - type: euclidean_precision value: 79.89536621823618 - type: euclidean_recall value: 76.13960113960114 - type: main_score value: 85.97216152106346 - type: manhattan_accuracy value: 87.85161027313494 - type: manhattan_accuracy_threshold value: 357242.9743885994 - type: manhattan_ap value: 85.96709490495458 - type: manhattan_f1 value: 77.9874213836478 - type: manhattan_f1_threshold value: 383558.8531732559 - type: manhattan_precision value: 76.5432098765432 - type: manhattan_recall value: 79.48717948717949 - type: max_ap value: 85.97216152106346 - type: max_f1 value: 77.9874213836478 - type: max_precision value: 79.89536621823618 - type: max_recall value: 82.4074074074074 - type: similarity_accuracy value: 87.50509580105992 - type: similarity_accuracy_threshold value: 89.01510631979949 - type: similarity_ap value: 85.58291779193907 - type: similarity_f1 value: 77.58919293384136 - type: similarity_f1_threshold value: 87.10908804245841 - type: similarity_precision value: 75.52258934592044 - type: similarity_recall value: 79.77207977207978 task: type: PairClassification - dataset: config: default name: MTEB SICK-R-PL revision: fd5c2441b7eeff8676768036142af4cfa42c1339 split: test type: PL-MTEB/sickr-pl-sts metrics: - type: cosine_pearson value: 79.68602301743276 - type: cosine_spearman value: 78.15913085997471 - type: euclidean_pearson value: 77.19541180768627 - type: euclidean_spearman value: 77.9122894221527 - type: main_score value: 78.15913085997471 - type: manhattan_pearson value: 77.24713453824641 - type: manhattan_spearman value: 77.95971728547582 - type: pearson value: 79.68602301743276 - type: spearman value: 78.15913085997471 task: type: STS - dataset: config: pl name: MTEB STS22 (pl) revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3 split: test type: mteb/sts22-crosslingual-sts metrics: - type: cosine_pearson value: 42.01062393061261 - type: cosine_spearman value: 42.79076406559122 - type: euclidean_pearson value: 28.57786522106708 - type: euclidean_spearman value: 42.51040813516686 - type: main_score value: 42.79076406559122 - type: manhattan_pearson value: 28.855884350706653 - type: manhattan_spearman value: 42.77481125184737 - type: pearson value: 42.01062393061261 - type: spearman value: 42.79076406559122 task: type: STS - dataset: config: default name: MTEB SciFact-PL revision: 47932a35f045ef8ed01ba82bf9ff67f6e109207e split: test type: clarin-knext/scifact-pl metrics: - type: main_score value: 74.434 - type: map_at_1 value: 59.494 - type: map_at_10 value: 69.893 - type: map_at_100 value: 70.45 - type: map_at_1000 value: 70.466 - type: map_at_20 value: 70.259 - type: map_at_3 value: 67.037 - type: map_at_5 value: 68.777 - type: mrr_at_1 value: 62.66666666666667 - type: mrr_at_10 value: 71.04457671957671 - type: mrr_at_100 value: 71.52299909263925 - type: mrr_at_1000 value: 71.53881086964122 - type: mrr_at_20 value: 71.33636271136271 - type: mrr_at_3 value: 69.16666666666667 - type: mrr_at_5 value: 70.26666666666667 - type: nauc_map_at_1000_diff1 value: 68.97113084189034 - type: nauc_map_at_1000_max value: 51.00665747497857 - type: nauc_map_at_1000_std value: 8.970270487093412 - type: nauc_map_at_100_diff1 value: 68.97281660521169 - type: nauc_map_at_100_max value: 51.01659549614879 - type: nauc_map_at_100_std value: 8.986483862053491 - type: nauc_map_at_10_diff1 value: 69.07605123979184 - type: nauc_map_at_10_max value: 51.229841935772804 - type: nauc_map_at_10_std value: 9.050901052243548 - type: nauc_map_at_1_diff1 value: 71.46187295357046 - type: nauc_map_at_1_max value: 46.82038076857106 - type: nauc_map_at_1_std value: 6.931602615510153 - type: nauc_map_at_20_diff1 value: 68.93823362705625 - type: nauc_map_at_20_max value: 51.15218544845727 - type: nauc_map_at_20_std value: 8.993550237629675 - type: nauc_map_at_3_diff1 value: 69.19558420072627 - type: nauc_map_at_3_max value: 47.345905341053886 - type: nauc_map_at_3_std value: 4.833936436252541 - type: nauc_map_at_5_diff1 value: 69.05067049349557 - type: nauc_map_at_5_max value: 49.62866209452668 - type: nauc_map_at_5_std value: 7.455937282103214 - type: nauc_mrr_at_1000_diff1 value: 69.2896395759106 - type: nauc_mrr_at_1000_max value: 54.20478659857226 - type: nauc_mrr_at_1000_std value: 12.534151525016302 - type: nauc_mrr_at_100_diff1 value: 69.29115865311857 - type: nauc_mrr_at_100_max value: 54.212882919608475 - type: nauc_mrr_at_100_std value: 12.548435473868432 - type: nauc_mrr_at_10_diff1 value: 69.29596234146305 - type: nauc_mrr_at_10_max value: 54.391683731646935 - type: nauc_mrr_at_10_std value: 12.74312540729047 - type: nauc_mrr_at_1_diff1 value: 71.19661136604304 - type: nauc_mrr_at_1_max value: 53.50646788895577 - type: nauc_mrr_at_1_std value: 14.68408048005645 - type: nauc_mrr_at_20_diff1 value: 69.24714813412893 - type: nauc_mrr_at_20_max value: 54.32239828421196 - type: nauc_mrr_at_20_std value: 12.623980761665866 - type: nauc_mrr_at_3_diff1 value: 69.22708724496187 - type: nauc_mrr_at_3_max value: 53.18873450995116 - type: nauc_mrr_at_3_std value: 11.336687945925586 - type: nauc_mrr_at_5_diff1 value: 69.10748983236182 - type: nauc_mrr_at_5_max value: 53.878090193979034 - type: nauc_mrr_at_5_std value: 12.079036178698662 - type: nauc_ndcg_at_1000_diff1 value: 68.66705448374432 - type: nauc_ndcg_at_1000_max value: 52.74699991296371 - type: nauc_ndcg_at_1000_std value: 10.535824386304968 - type: nauc_ndcg_at_100_diff1 value: 68.66862462407086 - type: nauc_ndcg_at_100_max value: 52.979821543362874 - type: nauc_ndcg_at_100_std value: 10.856284103500371 - type: nauc_ndcg_at_10_diff1 value: 68.66965948376267 - type: nauc_ndcg_at_10_max value: 53.978681919984474 - type: nauc_ndcg_at_10_std value: 11.10472732803466 - type: nauc_ndcg_at_1_diff1 value: 71.19661136604304 - type: nauc_ndcg_at_1_max value: 53.50646788895577 - type: nauc_ndcg_at_1_std value: 14.68408048005645 - type: nauc_ndcg_at_20_diff1 value: 68.20754850499976 - type: nauc_ndcg_at_20_max value: 53.590485842045595 - type: nauc_ndcg_at_20_std value: 10.719753086433334 - type: nauc_ndcg_at_3_diff1 value: 68.23406959629385 - type: nauc_ndcg_at_3_max value: 48.8837450762613 - type: nauc_ndcg_at_3_std value: 6.287949648205997 - type: nauc_ndcg_at_5_diff1 value: 68.52532849588677 - type: nauc_ndcg_at_5_max value: 51.29845300513165 - type: nauc_ndcg_at_5_std value: 8.15488455762137 - type: nauc_precision_at_1000_diff1 value: -29.56388929021074 - type: nauc_precision_at_1000_max value: 18.61674681637121 - type: nauc_precision_at_1000_std value: 41.68541412973936 - type: nauc_precision_at_100_diff1 value: -17.020740767390375 - type: nauc_precision_at_100_max value: 24.321682766394957 - type: nauc_precision_at_100_std value: 39.36188711602 - type: nauc_precision_at_10_diff1 value: 7.735819461600302 - type: nauc_precision_at_10_max value: 39.59963139423176 - type: nauc_precision_at_10_std value: 33.923494696390385 - type: nauc_precision_at_1_diff1 value: 71.19661136604304 - type: nauc_precision_at_1_max value: 53.50646788895577 - type: nauc_precision_at_1_std value: 14.68408048005645 - type: nauc_precision_at_20_diff1 value: -3.587900694179661 - type: nauc_precision_at_20_max value: 33.36606615861144 - type: nauc_precision_at_20_std value: 34.51624192343654 - type: nauc_precision_at_3_diff1 value: 41.996620318298625 - type: nauc_precision_at_3_max value: 43.08007454860597 - type: nauc_precision_at_3_std value: 14.398965447916495 - type: nauc_precision_at_5_diff1 value: 25.054180107661132 - type: nauc_precision_at_5_max value: 40.94617942853718 - type: nauc_precision_at_5_std value: 23.69992709404865 - type: nauc_recall_at_1000_diff1 value: .nan - type: nauc_recall_at_1000_max value: .nan - type: nauc_recall_at_1000_std value: .nan - type: nauc_recall_at_100_diff1 value: 68.09523809523836 - type: nauc_recall_at_100_max value: 63.034547152194406 - type: nauc_recall_at_100_std value: 23.594771241830657 - type: nauc_recall_at_10_diff1 value: 66.43213426149696 - type: nauc_recall_at_10_max value: 63.07509853849101 - type: nauc_recall_at_10_std value: 15.44924084252273 - type: nauc_recall_at_1_diff1 value: 71.46187295357046 - type: nauc_recall_at_1_max value: 46.82038076857106 - type: nauc_recall_at_1_std value: 6.931602615510153 - type: nauc_recall_at_20_diff1 value: 61.64354198229226 - type: nauc_recall_at_20_max value: 63.09950698826864 - type: nauc_recall_at_20_std value: 12.823209698925014 - type: nauc_recall_at_3_diff1 value: 65.63352507252078 - type: nauc_recall_at_3_max value: 45.10210171735505 - type: nauc_recall_at_3_std value: -0.08017546941514365 - type: nauc_recall_at_5_diff1 value: 65.93453179242769 - type: nauc_recall_at_5_max value: 51.97740656606473 - type: nauc_recall_at_5_std value: 4.929967882548962 - type: ndcg_at_1 value: 62.666999999999994 - type: ndcg_at_10 value: 74.434 - type: ndcg_at_100 value: 76.655 - type: ndcg_at_1000 value: 77.08 - type: ndcg_at_20 value: 75.588 - type: ndcg_at_3 value: 69.75099999999999 - type: ndcg_at_5 value: 72.09100000000001 - type: precision_at_1 value: 62.666999999999994 - type: precision_at_10 value: 9.9 - type: precision_at_100 value: 1.097 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_20 value: 5.2 - type: precision_at_3 value: 27.0 - type: precision_at_5 value: 17.933 - type: recall_at_1 value: 59.494 - type: recall_at_10 value: 87.13300000000001 - type: recall_at_100 value: 96.667 - type: recall_at_1000 value: 100.0 - type: recall_at_20 value: 91.43299999999999 - type: recall_at_3 value: 74.461 - type: recall_at_5 value: 80.34400000000001 task: type: Retrieval - dataset: config: default name: MTEB TRECCOVID-PL revision: 81bcb408f33366c2a20ac54adafad1ae7e877fdd split: test type: clarin-knext/trec-covid-pl metrics: - type: main_score value: 82.749 - type: map_at_1 value: 0.20400000000000001 - type: map_at_10 value: 2.099 - type: map_at_100 value: 12.948 - type: map_at_1000 value: 32.007000000000005 - type: map_at_20 value: 3.746 - type: map_at_3 value: 0.651 - type: map_at_5 value: 1.061 - type: mrr_at_1 value: 84.0 - type: mrr_at_10 value: 91.66666666666666 - type: mrr_at_100 value: 91.66666666666666 - type: mrr_at_1000 value: 91.66666666666666 - type: mrr_at_20 value: 91.66666666666666 - type: mrr_at_3 value: 91.66666666666666 - type: mrr_at_5 value: 91.66666666666666 - type: nauc_map_at_1000_diff1 value: 1.0291414165448085 - type: nauc_map_at_1000_max value: 57.33479540784058 - type: nauc_map_at_1000_std value: 76.70364036170582 - type: nauc_map_at_100_diff1 value: 6.949672309533349 - type: nauc_map_at_100_max value: 43.99861611069154 - type: nauc_map_at_100_std value: 64.12473626966596 - type: nauc_map_at_10_diff1 value: 4.208568177173666 - type: nauc_map_at_10_max value: 18.875910045226423 - type: nauc_map_at_10_std value: 34.58171216714189 - type: nauc_map_at_1_diff1 value: 8.433450768728983 - type: nauc_map_at_1_max value: 24.08001091473891 - type: nauc_map_at_1_std value: 35.21473053133869 - type: nauc_map_at_20_diff1 value: 6.041054220619057 - type: nauc_map_at_20_max value: 22.57475437061051 - type: nauc_map_at_20_std value: 35.254808865756964 - type: nauc_map_at_3_diff1 value: 11.166815378728485 - type: nauc_map_at_3_max value: 18.995433996118248 - type: nauc_map_at_3_std value: 34.29696290521795 - type: nauc_map_at_5_diff1 value: 7.1134812647567855 - type: nauc_map_at_5_max value: 20.03877039266845 - type: nauc_map_at_5_std value: 36.21644151312843 - type: nauc_mrr_at_1000_diff1 value: -7.262394669801826 - type: nauc_mrr_at_1000_max value: 66.22378992749366 - type: nauc_mrr_at_1000_std value: 68.18146188516563 - type: nauc_mrr_at_100_diff1 value: -7.262394669801826 - type: nauc_mrr_at_100_max value: 66.22378992749366 - type: nauc_mrr_at_100_std value: 68.18146188516563 - type: nauc_mrr_at_10_diff1 value: -7.262394669801826 - type: nauc_mrr_at_10_max value: 66.22378992749366 - type: nauc_mrr_at_10_std value: 68.18146188516563 - type: nauc_mrr_at_1_diff1 value: -11.38929798723619 - type: nauc_mrr_at_1_max value: 68.58738340697101 - type: nauc_mrr_at_1_std value: 68.00441826215022 - type: nauc_mrr_at_20_diff1 value: -7.262394669801826 - type: nauc_mrr_at_20_max value: 66.22378992749366 - type: nauc_mrr_at_20_std value: 68.18146188516563 - type: nauc_mrr_at_3_diff1 value: -7.262394669801826 - type: nauc_mrr_at_3_max value: 66.22378992749366 - type: nauc_mrr_at_3_std value: 68.18146188516563 - type: nauc_mrr_at_5_diff1 value: -7.262394669801826 - type: nauc_mrr_at_5_max value: 66.22378992749366 - type: nauc_mrr_at_5_std value: 68.18146188516563 - type: nauc_ndcg_at_1000_diff1 value: 2.5628376286433334 - type: nauc_ndcg_at_1000_max value: 57.605148480655025 - type: nauc_ndcg_at_1000_std value: 76.62891677430625 - type: nauc_ndcg_at_100_diff1 value: -13.313083767893671 - type: nauc_ndcg_at_100_max value: 52.932453336031905 - type: nauc_ndcg_at_100_std value: 73.5050466104544 - type: nauc_ndcg_at_10_diff1 value: -6.837803344621873 - type: nauc_ndcg_at_10_max value: 59.29833159945462 - type: nauc_ndcg_at_10_std value: 63.719268128346705 - type: nauc_ndcg_at_1_diff1 value: 4.834338452523335 - type: nauc_ndcg_at_1_max value: 53.58546768562144 - type: nauc_ndcg_at_1_std value: 59.07659252386643 - type: nauc_ndcg_at_20_diff1 value: -9.617683189610558 - type: nauc_ndcg_at_20_max value: 54.57354685878183 - type: nauc_ndcg_at_20_std value: 63.15198506529425 - type: nauc_ndcg_at_3_diff1 value: 15.216236580270994 - type: nauc_ndcg_at_3_max value: 58.345749967766416 - type: nauc_ndcg_at_3_std value: 61.78177922399883 - type: nauc_ndcg_at_5_diff1 value: 1.3882436296634026 - type: nauc_ndcg_at_5_max value: 62.44013008368074 - type: nauc_ndcg_at_5_std value: 65.64455986653293 - type: nauc_precision_at_1000_diff1 value: -18.516822124710856 - type: nauc_precision_at_1000_max value: 33.10336267989325 - type: nauc_precision_at_1000_std value: 29.49816019882571 - type: nauc_precision_at_100_diff1 value: -14.113619184538592 - type: nauc_precision_at_100_max value: 55.55228172103563 - type: nauc_precision_at_100_std value: 69.64355056246397 - type: nauc_precision_at_10_diff1 value: -27.271286464111455 - type: nauc_precision_at_10_max value: 61.885272647604594 - type: nauc_precision_at_10_std value: 60.73389705676694 - type: nauc_precision_at_1_diff1 value: -11.38929798723619 - type: nauc_precision_at_1_max value: 68.58738340697101 - type: nauc_precision_at_1_std value: 68.00441826215022 - type: nauc_precision_at_20_diff1 value: -21.53639909310826 - type: nauc_precision_at_20_max value: 53.361537614358376 - type: nauc_precision_at_20_std value: 55.58737187496432 - type: nauc_precision_at_3_diff1 value: 3.785071466384217 - type: nauc_precision_at_3_max value: 61.66906148377818 - type: nauc_precision_at_3_std value: 62.81857369734561 - type: nauc_precision_at_5_diff1 value: -16.00339477131436 - type: nauc_precision_at_5_max value: 61.5246951163262 - type: nauc_precision_at_5_std value: 63.615062452722135 - type: nauc_recall_at_1000_diff1 value: 5.871263115826736 - type: nauc_recall_at_1000_max value: 50.48397949000848 - type: nauc_recall_at_1000_std value: 67.37950715297474 - type: nauc_recall_at_100_diff1 value: 8.310215006893952 - type: nauc_recall_at_100_max value: 28.687726825722386 - type: nauc_recall_at_100_std value: 50.34038560928654 - type: nauc_recall_at_10_diff1 value: 3.3408195168322075 - type: nauc_recall_at_10_max value: 6.89511828305496 - type: nauc_recall_at_10_std value: 22.929267555360028 - type: nauc_recall_at_1_diff1 value: 8.433450768728983 - type: nauc_recall_at_1_max value: 24.08001091473891 - type: nauc_recall_at_1_std value: 35.21473053133869 - type: nauc_recall_at_20_diff1 value: 5.307683260432045 - type: nauc_recall_at_20_max value: 10.025532087519974 - type: nauc_recall_at_20_std value: 24.110512570368947 - type: nauc_recall_at_3_diff1 value: 13.355136074654078 - type: nauc_recall_at_3_max value: 8.568079109800236 - type: nauc_recall_at_3_std value: 23.691593767005745 - type: nauc_recall_at_5_diff1 value: 6.535580157651383 - type: nauc_recall_at_5_max value: 9.1442468749571 - type: nauc_recall_at_5_std value: 27.00111567203191 - type: ndcg_at_1 value: 79.0 - type: ndcg_at_10 value: 82.749 - type: ndcg_at_100 value: 63.846000000000004 - type: ndcg_at_1000 value: 57.691 - type: ndcg_at_20 value: 77.076 - type: ndcg_at_3 value: 84.83800000000001 - type: ndcg_at_5 value: 83.016 - type: precision_at_1 value: 84.0 - type: precision_at_10 value: 87.8 - type: precision_at_100 value: 66.10000000000001 - type: precision_at_1000 value: 25.764 - type: precision_at_20 value: 81.10000000000001 - type: precision_at_3 value: 91.333 - type: precision_at_5 value: 88.8 - type: recall_at_1 value: 0.20400000000000001 - type: recall_at_10 value: 2.294 - type: recall_at_100 value: 16.134999999999998 - type: recall_at_1000 value: 54.981 - type: recall_at_20 value: 4.201 - type: recall_at_3 value: 0.699 - type: recall_at_5 value: 1.141 task: type: Retrieval --- <h1 align="center">FlagEmbedding</h1> For more details please refer to our Github: [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding). **BGE-Multilingual-Gemma2** is a LLM-based multilingual embedding model. It is trained on a diverse range of languages and tasks based on [google/gemma-2-9b](https://huggingface.co/google/gemma-2-9b). BGE-Multilingual-Gemma2 primarily demonstrates the following advancements: - Diverse training data: The model's training data spans a broad range of languages, including English, Chinese, Japanese, Korean, French, and more.Additionally, the data covers a variety of task types, such as retrieval, classification, and clustering. - Outstanding performance: The model exhibits state-of-the-art (SOTA) results on multilingual benchmarks like MIRACL, MTEB-pl, and MTEB-fr. It also achieves excellent performance on other major evaluations, including MTEB, C-MTEB and AIR-Bench. ## 📑 Open-source Plan - [x] Checkpoint - [ ] Training Data We will release the training data of **BGE-Multilingual-Gemma2** in the future. ## Usage ### Using FlagEmbedding ``` git clone https://github.com/FlagOpen/FlagEmbedding.git cd FlagEmbedding pip install -e . ``` ```python from FlagEmbedding import FlagLLMModel queries = ["how much protein should a female eat", "summit define"] documents = [ "As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.", "Definition of summit for English Language Learners. : 1 the highest point of a mountain : the top of a mountain. : 2 the highest level. : 3 a meeting or series of meetings between the leaders of two or more governments." ] model = FlagLLMModel('BAAI/bge-multilingual-gemma2', query_instruction_for_retrieval="Given a web search query, retrieve relevant passages that answer the query.", use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation embeddings_1 = model.encode_queries(queries) embeddings_2 = model.encode_corpus(documents) similarity = embeddings_1 @ embeddings_2.T print(similarity) # [[ 0.559 0.01654 ] # [-0.002575 0.4998 ]] ``` By default, FlagLLMModel will use all available GPUs when encoding. Please set `os.environ["CUDA_VISIBLE_DEVICES"]` to select specific GPUs. You also can set `os.environ["CUDA_VISIBLE_DEVICES"]=""` to make all GPUs unavailable. ### Using Sentence Transformers ```python from sentence_transformers import SentenceTransformer import torch # Load the model, optionally in float16 precision for faster inference model = SentenceTransformer("BAAI/bge-multilingual-gemma2", model_kwargs={"torch_dtype": torch.float16}) # Prepare a prompt given an instruction instruction = 'Given a web search query, retrieve relevant passages that answer the query.' prompt = f'<instruct>{instruction}\n<query>' # Prepare queries and documents queries = [ 'how much protein should a female eat', 'summit define', ] documents = [ "As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.", "Definition of summit for English Language Learners. : 1 the highest point of a mountain : the top of a mountain. : 2 the highest level. : 3 a meeting or series of meetings between the leaders of two or more governments." ] # Compute the query and document embeddings query_embeddings = model.encode(queries, prompt=prompt) document_embeddings = model.encode(documents) # Compute the cosine similarity between the query and document embeddings similarities = model.similarity(query_embeddings, document_embeddings) print(similarities) # tensor([[ 0.5591, 0.0164], # [-0.0026, 0.4993]], dtype=torch.float16) ``` ### Using HuggingFace Transformers ```python import torch import torch.nn.functional as F from torch import Tensor from transformers import AutoTokenizer, AutoModel def last_token_pool(last_hidden_states: Tensor, attention_mask: Tensor) -> Tensor: left_padding = (attention_mask[:, -1].sum() == attention_mask.shape[0]) if left_padding: return last_hidden_states[:, -1] else: sequence_lengths = attention_mask.sum(dim=1) - 1 batch_size = last_hidden_states.shape[0] return last_hidden_states[torch.arange(batch_size, device=last_hidden_states.device), sequence_lengths] def get_detailed_instruct(task_description: str, query: str) -> str: return f'<instruct>{task_description}\n<query>{query}' task = 'Given a web search query, retrieve relevant passages that answer the query.' queries = [ get_detailed_instruct(task, 'how much protein should a female eat'), get_detailed_instruct(task, 'summit define') ] # No need to add instructions for documents documents = [ "As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.", "Definition of summit for English Language Learners. : 1 the highest point of a mountain : the top of a mountain. : 2 the highest level. : 3 a meeting or series of meetings between the leaders of two or more governments." ] input_texts = queries + documents tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-multilingual-gemma2') model = AutoModel.from_pretrained('BAAI/bge-multilingual-gemma2') model.eval() max_length = 4096 # Tokenize the input texts batch_dict = tokenizer(input_texts, max_length=max_length, padding=True, truncation=True, return_tensors='pt', pad_to_multiple_of=8) with torch.no_grad(): outputs = model(**batch_dict) embeddings = last_token_pool(outputs.last_hidden_state, batch_dict['attention_mask']) # normalize embeddings embeddings = F.normalize(embeddings, p=2, dim=1) scores = (embeddings[:2] @ embeddings[2:].T) * 100 print(scores.tolist()) # [[55.92064666748047, 1.6549524068832397], [-0.2698777914047241, 49.95653533935547]] ``` ## Evaluation `bge-multilingual-gemma2` exhibits **state-of-the-art (SOTA) results on benchmarks like MIRACL, MTEB-pl, and MTEB-fr**. It also achieves excellent performance on other major evaluations, including MTEB, C-MTEB and AIR-Bench. - [**MIRACL**](https://github.com/project-miracl/miracl) nDCG@10: <img src="./imgs/MIRACL_ndcg@10.png" alt="MIRACL-nDCG@10" style="zoom:200%;" /> Recall@100: <img src="./imgs/MIRACL_recall@100.png" alt="MIRACL-Recall@100" style="zoom:200%;" /> - [**MTEB-fr/pl**](https://huggingface.co/spaces/mteb/leaderboard) <img src="./imgs/MTEB_FR_PL.png" alt="MTEB-fr/pl" style="zoom:200%;" /> - [**MTEB**](https://huggingface.co/spaces/mteb/leaderboard) <img src="./imgs/MTEB.png" alt="MTEB" style="zoom:200%;" /> - [**BEIR**](https://huggingface.co/spaces/mteb/leaderboard) <img src="./imgs/BEIR.png" alt="BEIR" style="zoom:200%;" /> - [**C-MTEB**](https://huggingface.co/spaces/mteb/leaderboard) <img src="./imgs/C-MTEB.png" alt="C-MTEB" style="zoom:200%;" /> - [**AIR-Bench**](https://huggingface.co/spaces/AIR-Bench/leaderboard) Long-Doc (en, Recall@10): <img src="./imgs/AIR-Bench_Long-Doc_en.png" alt="AIR-Bench_Long-Doc" style="zoom:200%;" /> QA (en&zh, nDCG@10): <img src="./imgs/AIR-Bench_QA_en_zh.png" alt="AIR-Bench_QA" style="zoom:200%;" /> ## Model List `bge` is short for `BAAI general embedding`. | Model | Language | | Description | query instruction for retrieval [1] | | :----------------------------------------------------------- | :-----------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | | [BAAI/bge-multilingual-gemma2](https://huggingface.co/BAAI/bge-multilingual-gemma2) | Multilingual | - | A LLM-based multilingual embedding model, trained on a diverse range of languages and tasks. | | [BAAI/bge-en-icl](https://huggingface.co/BAAI/bge-en-icl) | English | - | A LLM-based dense retriever with in-context learning capabilities can fully leverage the model's potential based on a few shot examples(4096 tokens) | Provide instructions and few-shot examples freely based on the given task. | | [BAAI/bge-m3](https://huggingface.co/BAAI/bge-m3) | Multilingual | [Inference](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/BGE_M3#usage) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/BGE_M3) | Multi-Functionality(dense retrieval, sparse retrieval, multi-vector(colbert)), Multi-Linguality, and Multi-Granularity(8192 tokens) | | | [BAAI/llm-embedder](https://huggingface.co/BAAI/llm-embedder) | English | [Inference](./FlagEmbedding/llm_embedder/README.md) [Fine-tune](./FlagEmbedding/llm_embedder/README.md) | a unified embedding model to support diverse retrieval augmentation needs for LLMs | See [README](./FlagEmbedding/llm_embedder/README.md) | | [BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) | Chinese and English | [Inference](#usage-for-reranker) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker) | a cross-encoder model which is more accurate but less efficient [2] | | | [BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base) | Chinese and English | [Inference](#usage-for-reranker) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker) | a cross-encoder model which is more accurate but less efficient [2] | | | [BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-large-zh-v1.5](https://huggingface.co/BAAI/bge-large-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-base-zh-v1.5](https://huggingface.co/BAAI/bge-base-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-small-zh-v1.5](https://huggingface.co/BAAI/bge-small-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-large-en](https://huggingface.co/BAAI/bge-large-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | :trophy: rank **1st** in [MTEB](https://huggingface.co/spaces/mteb/leaderboard) leaderboard | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-base-en](https://huggingface.co/BAAI/bge-base-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a base-scale model but with similar ability to `bge-large-en` | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-small-en](https://huggingface.co/BAAI/bge-small-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a small-scale model but with competitive performance | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-large-zh](https://huggingface.co/BAAI/bge-large-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | :trophy: rank **1st** in [C-MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB) benchmark | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-base-zh](https://huggingface.co/BAAI/bge-base-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a base-scale model but with similar ability to `bge-large-zh` | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-small-zh](https://huggingface.co/BAAI/bge-small-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a small-scale model but with competitive performance | `为这个句子生成表示以用于检索相关文章:` | ## Citation If you find this repository useful, please consider giving a star :star: and citation ``` @misc{bge-m3, title={BGE M3-Embedding: Multi-Lingual, Multi-Functionality, Multi-Granularity Text Embeddings Through Self-Knowledge Distillation}, author={Jianlv Chen and Shitao Xiao and Peitian Zhang and Kun Luo and Defu Lian and Zheng Liu}, year={2024}, eprint={2402.03216}, archivePrefix={arXiv}, primaryClass={cs.CL} } @misc{bge_embedding, title={C-Pack: Packaged Resources To Advance General Chinese Embedding}, author={Shitao Xiao and Zheng Liu and Peitian Zhang and Niklas Muennighoff}, year={2023}, eprint={2309.07597}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
shahrukhx01/question-vs-statement-classifier
shahrukhx01
"2023-03-29T22:01:12Z"
84,518
40
transformers
[ "transformers", "pytorch", "safetensors", "bert", "text-classification", "neural-search-query-classification", "neural-search", "en", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
"2022-03-02T23:29:05Z"
--- language: "en" tags: - neural-search-query-classification - neural-search widget: - text: "what did you eat in lunch?" --- # KEYWORD STATEMENT VS QUESTION CLASSIFIER FOR NEURAL SEARCH ```python from transformers import AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("shahrukhx01/question-vs-statement-classifier") model = AutoModelForSequenceClassification.from_pretrained("shahrukhx01/question-vs-statement-classifier") ``` Trained to add the feature for classifying queries between Question Query vs Statement Query using classification in [Haystack](https://github.com/deepset-ai/haystack/issues/611)
mistralai/Mistral-Small-Instruct-2409
mistralai
"2024-10-11T12:11:58Z"
84,500
302
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "conversational", "en", "fr", "de", "es", "it", "pt", "zh", "ja", "ru", "ko", "license:other", "autotrain_compatible", "text-generation-inference", "region:us" ]
text-generation
"2024-09-17T13:05:37Z"
--- language: - en - fr - de - es - it - pt - zh - ja - ru - ko license: other license_name: mrl inference: false license_link: https://mistral.ai/licenses/MRL-0.1.md extra_gated_description: >- If you want to learn more about how we process your personal data, please read our <a href="https://mistral.ai/terms/">Privacy Policy</a>. library_name: transformers --- # Model Card for Mistral-Small-Instruct-2409 Mistral-Small-Instruct-2409 is an instruct fine-tuned version with the following characteristics: - 22B parameters - Vocabulary to 32768 - Supports function calling - 32k sequence length ## Usage Examples ### vLLM (recommended) We recommend using this model with the [vLLM library](https://github.com/vllm-project/vllm) to implement production-ready inference pipelines. **_Installation_** Make sure you install `vLLM >= v0.6.1.post1`: ``` pip install --upgrade vllm ``` Also make sure you have `mistral_common >= 1.4.1` installed: ``` pip install --upgrade mistral_common ``` You can also make use of a ready-to-go [docker image](https://hub.docker.com/layers/vllm/vllm-openai/latest/images/sha256-de9032a92ffea7b5c007dad80b38fd44aac11eddc31c435f8e52f3b7404bbf39?context=explore). **_Offline_** ```py from vllm import LLM from vllm.sampling_params import SamplingParams model_name = "mistralai/Mistral-Small-Instruct-2409" sampling_params = SamplingParams(max_tokens=8192) # note that running Mistral-Small on a single GPU requires at least 44 GB of GPU RAM # If you want to divide the GPU requirement over multiple devices, please add *e.g.* `tensor_parallel=2` llm = LLM(model=model_name, tokenizer_mode="mistral", config_format="mistral", load_format="mistral") prompt = "How often does the letter r occur in Mistral?" messages = [ { "role": "user", "content": prompt }, ] outputs = llm.chat(messages, sampling_params=sampling_params) print(outputs[0].outputs[0].text) ``` **_Server_** You can also use Mistral Small in a server/client setting. 1. Spin up a server: ``` vllm serve mistralai/Mistral-Small-Instruct-2409 --tokenizer_mode mistral --config_format mistral --load_format mistral ``` **Note:** Running Mistral-Small on a single GPU requires at least 44 GB of GPU RAM. If you want to divide the GPU requirement over multiple devices, please add *e.g.* `--tensor_parallel=2` 2. And ping the client: ``` curl --location 'http://<your-node-url>:8000/v1/chat/completions' \ --header 'Content-Type: application/json' \ --header 'Authorization: Bearer token' \ --data '{ "model": "mistralai/Mistral-Small-Instruct-2409", "messages": [ { "role": "user", "content": "How often does the letter r occur in Mistral?" } ] }' ``` ### Mistral-inference We recommend using [mistral-inference](https://github.com/mistralai/mistral-inference) to quickly try out / "vibe-check" the model. **_Install_** Make sure to have `mistral_inference >= 1.4.1` installed. ``` pip install mistral_inference --upgrade ``` **_Download_** ```py from huggingface_hub import snapshot_download from pathlib import Path mistral_models_path = Path.home().joinpath('mistral_models', '22B-Instruct-Small') mistral_models_path.mkdir(parents=True, exist_ok=True) snapshot_download(repo_id="mistralai/Mistral-Small-Instruct-2409", allow_patterns=["params.json", "consolidated.safetensors", "tokenizer.model.v3"], local_dir=mistral_models_path) ``` ### Chat After installing `mistral_inference`, a `mistral-chat` CLI command should be available in your environment. You can chat with the model using ``` mistral-chat $HOME/mistral_models/22B-Instruct-Small --instruct --max_tokens 256 ``` ### Instruct following ```py from mistral_inference.transformer import Transformer from mistral_inference.generate import generate from mistral_common.tokens.tokenizers.mistral import MistralTokenizer from mistral_common.protocol.instruct.messages import UserMessage from mistral_common.protocol.instruct.request import ChatCompletionRequest tokenizer = MistralTokenizer.from_file(f"{mistral_models_path}/tokenizer.model.v3") model = Transformer.from_folder(mistral_models_path) completion_request = ChatCompletionRequest(messages=[UserMessage(content="How often does the letter r occur in Mistral?")]) tokens = tokenizer.encode_chat_completion(completion_request).tokens out_tokens, _ = generate([tokens], model, max_tokens=64, temperature=0.0, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id) result = tokenizer.instruct_tokenizer.tokenizer.decode(out_tokens[0]) print(result) ``` ### Function calling ```py from mistral_common.protocol.instruct.tool_calls import Function, Tool from mistral_inference.transformer import Transformer from mistral_inference.generate import generate from mistral_common.tokens.tokenizers.mistral import MistralTokenizer from mistral_common.protocol.instruct.messages import UserMessage from mistral_common.protocol.instruct.request import ChatCompletionRequest tokenizer = MistralTokenizer.from_file(f"{mistral_models_path}/tokenizer.model.v3") model = Transformer.from_folder(mistral_models_path) completion_request = ChatCompletionRequest( tools=[ Tool( function=Function( name="get_current_weather", description="Get the current weather", parameters={ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA", }, "format": { "type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit to use. Infer this from the users location.", }, }, "required": ["location", "format"], }, ) ) ], messages=[ UserMessage(content="What's the weather like today in Paris?"), ], ) tokens = tokenizer.encode_chat_completion(completion_request).tokens out_tokens, _ = generate([tokens], model, max_tokens=64, temperature=0.0, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id) result = tokenizer.instruct_tokenizer.tokenizer.decode(out_tokens[0]) print(result) ``` ### Usage in Hugging Face Transformers You can also use Hugging Face `transformers` library to run inference using various chat templates, or fine-tune the model. Example for inference: ```python from transformers import LlamaTokenizerFast, MistralForCausalLM import torch device = "cuda" tokenizer = LlamaTokenizerFast.from_pretrained('mistralai/Mistral-Small-Instruct-2409') tokenizer.pad_token = tokenizer.eos_token model = MistralForCausalLM.from_pretrained('mistralai/Mistral-Small-Instruct-2409', torch_dtype=torch.bfloat16) model = model.to(device) prompt = "How often does the letter r occur in Mistral?" messages = [ {"role": "user", "content": prompt}, ] model_input = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(device) gen = model.generate(model_input, max_new_tokens=150) dec = tokenizer.batch_decode(gen) print(dec) ``` And you should obtain ```text <s> [INST] How often does the letter r occur in Mistral? [/INST] To determine how often the letter "r" occurs in the word "Mistral," we can simply count the instances of "r" in the word. The word "Mistral" is broken down as follows: - M - i - s - t - r - a - l Counting the "r"s, we find that there is only one "r" in "Mistral." Therefore, the letter "r" occurs once in the word "Mistral." </s> ``` ## The Mistral AI Team Albert Jiang, Alexandre Sablayrolles, Alexis Tacnet, Alok Kothari, Antoine Roux, Arthur Mensch, Audrey Herblin-Stoop, Augustin Garreau, Austin Birky, Bam4d, Baptiste Bout, Baudouin de Monicault, Blanche Savary, Carole Rambaud, Caroline Feldman, Devendra Singh Chaplot, Diego de las Casas, Diogo Costa, Eleonore Arcelin, Emma Bou Hanna, Etienne Metzger, Gaspard Blanchet, Gianna Lengyel, Guillaume Bour, Guillaume Lample, Harizo Rajaona, Henri Roussez, Hichem Sattouf, Ian Mack, Jean-Malo Delignon, Jessica Chudnovsky, Justus Murke, Kartik Khandelwal, Lawrence Stewart, Louis Martin, Louis Ternon, Lucile Saulnier, Lélio Renard Lavaud, Margaret Jennings, Marie Pellat, Marie Torelli, Marie-Anne Lachaux, Marjorie Janiewicz, Mickaël Seznec, Nicolas Schuhl, Niklas Muhs, Olivier de Garrigues, Patrick von Platen, Paul Jacob, Pauline Buche, Pavan Kumar Reddy, Perry Savas, Pierre Stock, Romain Sauvestre, Sagar Vaze, Sandeep Subramanian, Saurabh Garg, Sophia Yang, Szymon Antoniak, Teven Le Scao, Thibault Schueller, Thibaut Lavril, Thomas Wang, Théophile Gervet, Timothée Lacroix, Valera Nemychnikova, Wendy Shang, William El Sayed, William Marshall
princeton-nlp/sup-simcse-bert-base-uncased
princeton-nlp
"2021-05-20T02:54:31Z"
84,471
20
transformers
[ "transformers", "pytorch", "jax", "bert", "feature-extraction", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
feature-extraction
"2022-03-02T23:29:05Z"
Entry not found
Qwen/Qwen2-72B-Instruct
Qwen
"2024-10-08T05:16:14Z"
84,414
663
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "chat", "conversational", "en", "arxiv:2309.00071", "base_model:Qwen/Qwen2-72B", "base_model:finetune:Qwen/Qwen2-72B", "license:other", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
"2024-05-28T03:48:49Z"
--- license: other license_name: tongyi-qianwen license_link: https://huggingface.co/Qwen/Qwen2-72B-Instruct/blob/main/LICENSE language: - en pipeline_tag: text-generation base_model: Qwen/Qwen2-72B tags: - chat new_version: Qwen/Qwen2.5-72B-Instruct --- # Qwen2-72B-Instruct ## Introduction Qwen2 is the new series of Qwen large language models. For Qwen2, we release a number of base language models and instruction-tuned language models ranging from 0.5 to 72 billion parameters, including a Mixture-of-Experts model. This repo contains the instruction-tuned 72B Qwen2 model. Compared with the state-of-the-art opensource language models, including the previous released Qwen1.5, Qwen2 has generally surpassed most opensource models and demonstrated competitiveness against proprietary models across a series of benchmarks targeting for language understanding, language generation, multilingual capability, coding, mathematics, reasoning, etc. Qwen2-72B-Instruct supports a context length of up to 131,072 tokens, enabling the processing of extensive inputs. Please refer to [this section](#processing-long-texts) for detailed instructions on how to deploy Qwen2 for handling long texts. For more details, please refer to our [blog](https://qwenlm.github.io/blog/qwen2/), [GitHub](https://github.com/QwenLM/Qwen2), and [Documentation](https://qwen.readthedocs.io/en/latest/). <br> ## Model Details Qwen2 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. ## Training details We pretrained the models with a large amount of data, and we post-trained the models with both supervised finetuning and direct preference optimization. ## Requirements The code of Qwen2 has been in the latest Hugging face transformers and we advise you to install `transformers>=4.37.0`, or you might encounter the following error: ``` KeyError: 'qwen2' ``` ## Quickstart Here provides a code snippet with `apply_chat_template` to show you how to load the tokenizer and model and how to generate contents. ```python from transformers import AutoModelForCausalLM, AutoTokenizer device = "cuda" # the device to load the model onto model = AutoModelForCausalLM.from_pretrained( "Qwen/Qwen2-72B-Instruct", torch_dtype="auto", device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-72B-Instruct") prompt = "Give me a short introduction to large language model." messages = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": prompt} ] text = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) model_inputs = tokenizer([text], return_tensors="pt").to(device) generated_ids = model.generate( model_inputs.input_ids, max_new_tokens=512 ) generated_ids = [ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) ] response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] ``` ### Processing Long Texts To handle extensive inputs exceeding 32,768 tokens, we utilize [YARN](https://arxiv.org/abs/2309.00071), a technique for enhancing model length extrapolation, ensuring optimal performance on lengthy texts. For deployment, we recommend using vLLM. You can enable the long-context capabilities by following these steps: 1. **Install vLLM**: You can install vLLM by running the following command. ```bash pip install "vllm>=0.4.3" ``` Or you can install vLLM from [source](https://github.com/vllm-project/vllm/). 2. **Configure Model Settings**: After downloading the model weights, modify the `config.json` file by including the below snippet: ```json { "architectures": [ "Qwen2ForCausalLM" ], // ... "vocab_size": 152064, // adding the following snippets "rope_scaling": { "factor": 4.0, "original_max_position_embeddings": 32768, "type": "yarn" } } ``` This snippet enable YARN to support longer contexts. 3. **Model Deployment**: Utilize vLLM to deploy your model. For instance, you can set up an openAI-like server using the command: ```bash python -m vllm.entrypoints.openai.api_server --served-model-name Qwen2-72B-Instruct --model path/to/weights ``` Then you can access the Chat API by: ```bash curl http://localhost:8000/v1/chat/completions \ -H "Content-Type: application/json" \ -d '{ "model": "Qwen2-72B-Instruct", "messages": [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Your Long Input Here."} ] }' ``` For further usage instructions of vLLM, please refer to our [Github](https://github.com/QwenLM/Qwen2). **Note**: Presently, vLLM only supports static YARN, which means the scaling factor remains constant regardless of input length, **potentially impacting performance on shorter texts**. We advise adding the `rope_scaling` configuration only when processing long contexts is required. ## Evaluation We briefly compare Qwen2-72B-Instruct with similar-sized instruction-tuned LLMs, including our previous Qwen1.5-72B-Chat. The results are shown as follows: | Datasets | Llama-3-70B-Instruct | Qwen1.5-72B-Chat | **Qwen2-72B-Instruct** | | :--- | :---: | :---: | :---: | | _**English**_ | | | | | MMLU | 82.0 | 75.6 | **82.3** | | MMLU-Pro | 56.2 | 51.7 | **64.4** | | GPQA | 41.9 | 39.4 | **42.4** | | TheroemQA | 42.5 | 28.8 | **44.4** | | MT-Bench | 8.95 | 8.61 | **9.12** | | Arena-Hard | 41.1 | 36.1 | **48.1** | | IFEval (Prompt Strict-Acc.) | 77.3 | 55.8 | **77.6** | | _**Coding**_ | | | | | HumanEval | 81.7 | 71.3 | **86.0** | | MBPP | **82.3** | 71.9 | 80.2 | | MultiPL-E | 63.4 | 48.1 | **69.2** | | EvalPlus | 75.2 | 66.9 | **79.0** | | LiveCodeBench | 29.3 | 17.9 | **35.7** | | _**Mathematics**_ | | | | | GSM8K | **93.0** | 82.7 | 91.1 | | MATH | 50.4 | 42.5 | **59.7** | | _**Chinese**_ | | | | | C-Eval | 61.6 | 76.1 | **83.8** | | AlignBench | 7.42 | 7.28 | **8.27** | ## Citation If you find our work helpful, feel free to give us a cite. ``` @article{qwen2, title={Qwen2 Technical Report}, year={2024} } ```
jonatasgrosman/wav2vec2-large-xlsr-53-hungarian
jonatasgrosman
"2022-12-14T01:57:43Z"
84,403
6
transformers
[ "transformers", "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "audio", "speech", "xlsr-fine-tuning-week", "hu", "dataset:common_voice", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
"2022-03-02T23:29:05Z"
--- language: hu datasets: - common_voice metrics: - wer - cer tags: - audio - automatic-speech-recognition - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: XLSR Wav2Vec2 Hungarian by Jonatas Grosman results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice hu type: common_voice args: hu metrics: - name: Test WER type: wer value: 31.40 - name: Test CER type: cer value: 6.20 --- # Fine-tuned XLSR-53 large model for speech recognition in Hungarian Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Hungarian using the train and validation splits of [Common Voice 6.1](https://huggingface.co/datasets/common_voice) and [CSS10](https://github.com/Kyubyong/css10). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned thanks to the GPU credits generously given by the [OVHcloud](https://www.ovhcloud.com/en/public-cloud/ai-training/) :) The script used for training can be found here: https://github.com/jonatasgrosman/wav2vec2-sprint ## Usage The model can be used directly (without a language model) as follows... Using the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) library: ```python from huggingsound import SpeechRecognitionModel model = SpeechRecognitionModel("jonatasgrosman/wav2vec2-large-xlsr-53-hungarian") audio_paths = ["/path/to/file.mp3", "/path/to/another_file.wav"] transcriptions = model.transcribe(audio_paths) ``` Writing your own inference script: ```python import torch import librosa from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor LANG_ID = "hu" MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-hungarian" SAMPLES = 5 test_dataset = load_dataset("common_voice", LANG_ID, split=f"test[:{SAMPLES}]") processor = Wav2Vec2Processor.from_pretrained(MODEL_ID) model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID) # Preprocessing the datasets. # We need to read the audio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = librosa.load(batch["path"], sr=16_000) batch["speech"] = speech_array batch["sentence"] = batch["sentence"].upper() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_sentences = processor.batch_decode(predicted_ids) for i, predicted_sentence in enumerate(predicted_sentences): print("-" * 100) print("Reference:", test_dataset[i]["sentence"]) print("Prediction:", predicted_sentence) ``` | Reference | Prediction | | ------------- | ------------- | | BÜSZKÉK VAGYUNK A MAGYAR EMBEREK NAGYSZERŰ SZELLEMI ALKOTÁSAIRA. | BÜSZKÉK VAGYUNK A MAGYAR EMBEREK NAGYSZERŰ SZELLEMI ALKOTÁSAIRE | | A NEMZETSÉG TAGJAI KÖZÜL EZT TERMESZTIK A LEGSZÉLESEBB KÖRBEN ÍZLETES TERMÉSÉÉRT. | A NEMZETSÉG TAGJAI KÖZÜL ESZSZERMESZTIK A LEGSZELESEBB KÖRBEN IZLETES TERMÉSSÉÉRT | | A VÁROSBA VÁGYÓDOTT A LEGJOBBAN, ÉPPEN MERT ODA NEM JUTHATOTT EL SOHA. | A VÁROSBA VÁGYÓDOTT A LEGJOBBAN ÉPPEN MERT ODA NEM JUTHATOTT EL SOHA | | SÍRJA MÁRA MEGSEMMISÜLT. | SIMGI A MANDO MEG SEMMICSEN | | MINDEN ZENESZÁMOT DRÁGAKŐNEK NEVEZETT. | MINDEN ZENA SZÁMODRAGAKŐNEK NEVEZETT | | ÍGY MÚLT EL A DÉLELŐTT. | ÍGY MÚLT EL A DÍN ELŐTT | | REMEK POFA! | A REMEG PUFO | | SZEMET SZEMÉRT, FOGAT FOGÉRT. | SZEMET SZEMÉRT FOGADD FOGÉRT | | BIZTOSAN LAKIK ITT NÉHÁNY ATYÁMFIA. | BIZTOSAN LAKIKÉT NÉHANY ATYAMFIA | | A SOROK KÖZÖTT OLVAS. | A SOROG KÖZÖTT OLVAS | ## Evaluation The model can be evaluated as follows on the Hungarian test data of Common Voice. ```python import torch import re import librosa from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor LANG_ID = "hu" MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-hungarian" DEVICE = "cuda" CHARS_TO_IGNORE = [",", "?", "¿", ".", "!", "¡", ";", ";", ":", '""', "%", '"', "�", "ʿ", "·", "჻", "~", "՞", "؟", "،", "।", "॥", "«", "»", "„", "“", "”", "「", "」", "‘", "’", "《", "》", "(", ")", "[", "]", "{", "}", "=", "`", "_", "+", "<", ">", "…", "–", "°", "´", "ʾ", "‹", "›", "©", "®", "—", "→", "。", "、", "﹂", "﹁", "‧", "~", "﹏", ",", "{", "}", "(", ")", "[", "]", "【", "】", "‥", "〽", "『", "』", "〝", "〟", "⟨", "⟩", "〜", ":", "!", "?", "♪", "؛", "/", "\\", "º", "−", "^", "ʻ", "ˆ"] test_dataset = load_dataset("common_voice", LANG_ID, split="test") wer = load_metric("wer.py") # https://github.com/jonatasgrosman/wav2vec2-sprint/blob/main/wer.py cer = load_metric("cer.py") # https://github.com/jonatasgrosman/wav2vec2-sprint/blob/main/cer.py chars_to_ignore_regex = f"[{re.escape(''.join(CHARS_TO_IGNORE))}]" processor = Wav2Vec2Processor.from_pretrained(MODEL_ID) model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID) model.to(DEVICE) # Preprocessing the datasets. # We need to read the audio files as arrays def speech_file_to_array_fn(batch): with warnings.catch_warnings(): warnings.simplefilter("ignore") speech_array, sampling_rate = librosa.load(batch["path"], sr=16_000) batch["speech"] = speech_array batch["sentence"] = re.sub(chars_to_ignore_regex, "", batch["sentence"]).upper() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # Preprocessing the datasets. # We need to read the audio files as arrays def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to(DEVICE), attention_mask=inputs.attention_mask.to(DEVICE)).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) predictions = [x.upper() for x in result["pred_strings"]] references = [x.upper() for x in result["sentence"]] print(f"WER: {wer.compute(predictions=predictions, references=references, chunk_size=1000) * 100}") print(f"CER: {cer.compute(predictions=predictions, references=references, chunk_size=1000) * 100}") ``` **Test Result**: In the table below I report the Word Error Rate (WER) and the Character Error Rate (CER) of the model. I ran the evaluation script described above on other models as well (on 2021-04-22). Note that the table below may show different results from those already reported, this may have been caused due to some specificity of the other evaluation scripts used. | Model | WER | CER | | ------------- | ------------- | ------------- | | jonatasgrosman/wav2vec2-large-xlsr-53-hungarian | **31.40%** | **6.20%** | | anton-l/wav2vec2-large-xlsr-53-hungarian | 42.39% | 9.39% | | gchhablani/wav2vec2-large-xlsr-hu | 46.42% | 10.04% | | birgermoell/wav2vec2-large-xlsr-hungarian | 46.93% | 10.31% | ## Citation If you want to cite this model you can use this: ```bibtex @misc{grosman2021xlsr53-large-hungarian, title={Fine-tuned {XLSR}-53 large model for speech recognition in {H}ungarian}, author={Grosman, Jonatas}, howpublished={\url{https://huggingface.co/jonatasgrosman/wav2vec2-large-xlsr-53-hungarian}}, year={2021} } ```
facebook/dragon-plus-context-encoder
facebook
"2023-09-27T17:26:22Z"
84,393
38
transformers
[ "transformers", "pytorch", "bert", "fill-mask", "feature-extraction", "arxiv:2302.07452", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
feature-extraction
"2023-02-15T18:19:38Z"
--- tags: - feature-extraction pipeline_tag: feature-extraction --- DRAGON+ is a BERT-base sized dense retriever initialized from [RetroMAE](https://huggingface.co/Shitao/RetroMAE) and further trained on the data augmented from MS MARCO corpus, following the approach described in [How to Train Your DRAGON: Diverse Augmentation Towards Generalizable Dense Retrieval](https://arxiv.org/abs/2302.07452). <p align="center"> <img src="https://raw.githubusercontent.com/facebookresearch/dpr-scale/main/dragon/images/teaser.png" width="600"> </p> The associated GitHub repository is available here https://github.com/facebookresearch/dpr-scale/tree/main/dragon. We use asymmetric dual encoder, with two distinctly parameterized encoders. The following models are also available: Model | Initialization | MARCO Dev | BEIR | Query Encoder Path | Context Encoder Path |---|---|---|---|---|--- DRAGON+ | Shitao/RetroMAE| 39.0 | 47.4 | [facebook/dragon-plus-query-encoder](https://huggingface.co/facebook/dragon-plus-query-encoder) | [facebook/dragon-plus-context-encoder](https://huggingface.co/facebook/dragon-plus-context-encoder) DRAGON-RoBERTa | RoBERTa-base | 39.4 | 47.2 | [facebook/dragon-roberta-query-encoder](https://huggingface.co/facebook/dragon-roberta-query-encoder) | [facebook/dragon-roberta-context-encoder](https://huggingface.co/facebook/dragon-roberta-context-encoder) ## Usage (HuggingFace Transformers) Using the model directly available in HuggingFace transformers . ```python import torch from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained('facebook/dragon-plus-query-encoder') query_encoder = AutoModel.from_pretrained('facebook/dragon-plus-query-encoder') context_encoder = AutoModel.from_pretrained('facebook/dragon-plus-context-encoder') # We use msmarco query and passages as an example query = "Where was Marie Curie born?" contexts = [ "Maria Sklodowska, later known as Marie Curie, was born on November 7, 1867.", "Born in Paris on 15 May 1859, Pierre Curie was the son of Eugène Curie, a doctor of French Catholic origin from Alsace." ] # Apply tokenizer query_input = tokenizer(query, return_tensors='pt') ctx_input = tokenizer(contexts, padding=True, truncation=True, return_tensors='pt') # Compute embeddings: take the last-layer hidden state of the [CLS] token query_emb = query_encoder(**query_input).last_hidden_state[:, 0, :] ctx_emb = context_encoder(**ctx_input).last_hidden_state[:, 0, :] # Compute similarity scores using dot product score1 = query_emb @ ctx_emb[0] # 396.5625 score2 = query_emb @ ctx_emb[1] # 393.8340 ```
microsoft/Florence-2-base-ft
microsoft
"2024-07-20T00:12:30Z"
84,368
92
transformers
[ "transformers", "pytorch", "florence2", "text-generation", "vision", "image-text-to-text", "custom_code", "arxiv:2311.06242", "license:mit", "autotrain_compatible", "region:us" ]
image-text-to-text
"2024-06-15T00:58:07Z"
--- license: mit license_link: https://huggingface.co/microsoft/Florence-2-base-ft/resolve/main/LICENSE pipeline_tag: image-text-to-text tags: - vision --- # Florence-2: Advancing a Unified Representation for a Variety of Vision Tasks ## Model Summary This Hub repository contains a HuggingFace's `transformers` implementation of Florence-2 model from Microsoft. Florence-2 is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks. Florence-2 can interpret simple text prompts to perform tasks like captioning, object detection, and segmentation. It leverages our FLD-5B dataset, containing 5.4 billion annotations across 126 million images, to master multi-task learning. The model's sequence-to-sequence architecture enables it to excel in both zero-shot and fine-tuned settings, proving to be a competitive vision foundation model. Resources and Technical Documentation: + [Florence-2 technical report](https://arxiv.org/abs/2311.06242). + [Jupyter Notebook for inference and visualization of Florence-2-large model](https://huggingface.co/microsoft/Florence-2-large/blob/main/sample_inference.ipynb) | Model | Model size | Model Description | | ------- | ------------- | ------------- | | Florence-2-base[[HF]](https://huggingface.co/microsoft/Florence-2-base) | 0.23B | Pretrained model with FLD-5B | Florence-2-large[[HF]](https://huggingface.co/microsoft/Florence-2-large) | 0.77B | Pretrained model with FLD-5B | Florence-2-base-ft[[HF]](https://huggingface.co/microsoft/Florence-2-base-ft) | 0.23B | Finetuned model on a colletion of downstream tasks | Florence-2-large-ft[[HF]](https://huggingface.co/microsoft/Florence-2-large-ft) | 0.77B | Finetuned model on a colletion of downstream tasks ## How to Get Started with the Model Use the code below to get started with the model. All models are trained with float16. ```python import requests from PIL import Image from transformers import AutoProcessor, AutoModelForCausalLM device = "cuda:0" if torch.cuda.is_available() else "cpu" torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-base-ft", torch_dtype=torch_dtype, trust_remote_code=True).to(device) processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base-ft", trust_remote_code=True) prompt = "<OD>" url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(text=prompt, images=image, return_tensors="pt").to(device, torch_dtype) generated_ids = model.generate( input_ids=inputs["input_ids"], pixel_values=inputs["pixel_values"], max_new_tokens=1024, do_sample=False, num_beams=3 ) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0] parsed_answer = processor.post_process_generation(generated_text, task="<OD>", image_size=(image.width, image.height)) print(parsed_answer) ``` ## Tasks This model is capable of performing different tasks through changing the prompts. First, let's define a function to run a prompt. <details> <summary> Click to expand </summary> ```python import requests from PIL import Image from transformers import AutoProcessor, AutoModelForCausalLM device = "cuda:0" if torch.cuda.is_available() else "cpu" torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-base-ft", torch_dtype=torch_dtype, trust_remote_code=True).to(device) processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base-ft", trust_remote_code=True) url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true" image = Image.open(requests.get(url, stream=True).raw) def run_example(task_prompt, text_input=None): if text_input is None: prompt = task_prompt else: prompt = task_prompt + text_input inputs = processor(text=prompt, images=image, return_tensors="pt").to(device, torch_dtype) generated_ids = model.generate( input_ids=inputs["input_ids"], pixel_values=inputs["pixel_values"], max_new_tokens=1024, num_beams=3 ) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0] parsed_answer = processor.post_process_generation(generated_text, task=task_prompt, image_size=(image.width, image.height)) print(parsed_answer) ``` </details> Here are the tasks `Florence-2` could perform: <details> <summary> Click to expand </summary> ### Caption ```python prompt = "<CAPTION>" run_example(prompt) ``` ### Detailed Caption ```python prompt = "<DETAILED_CAPTION>" run_example(prompt) ``` ### More Detailed Caption ```python prompt = "<MORE_DETAILED_CAPTION>" run_example(prompt) ``` ### Caption to Phrase Grounding caption to phrase grounding task requires additional text input, i.e. caption. Caption to phrase grounding results format: {'\<CAPTION_TO_PHRASE_GROUNDING>': {'bboxes': [[x1, y1, x2, y2], ...], 'labels': ['', '', ...]}} ```python task_prompt = '<CAPTION_TO_PHRASE_GROUNDING>" results = run_example(task_prompt, text_input="A green car parked in front of a yellow building.") ``` ### Object Detection OD results format: {'\<OD>': {'bboxes': [[x1, y1, x2, y2], ...], 'labels': ['label1', 'label2', ...]} } ```python prompt = "<OD>" run_example(prompt) ``` ### Dense Region Caption Dense region caption results format: {'\<DENSE_REGION_CAPTION>' : {'bboxes': [[x1, y1, x2, y2], ...], 'labels': ['label1', 'label2', ...]} } ```python prompt = "<DENSE_REGION_CAPTION>" run_example(prompt) ``` ### Region proposal Dense region caption results format: {'\<REGION_PROPOSAL>': {'bboxes': [[x1, y1, x2, y2], ...], 'labels': ['', '', ...]}} ```python prompt = "<REGION_PROPOSAL>" run_example(prompt) ``` ### OCR ```python prompt = "<OCR>" run_example(prompt) ``` ### OCR with Region OCR with region output format: {'\<OCR_WITH_REGION>': {'quad_boxes': [[x1, y1, x2, y2, x3, y3, x4, y4], ...], 'labels': ['text1', ...]}} ```python prompt = "<OCR_WITH_REGION>" run_example(prompt) ``` for More detailed examples, please refer to [notebook](https://huggingface.co/microsoft/Florence-2-large/blob/main/sample_inference.ipynb) </details> # Benchmarks ## Florence-2 Zero-shot performance The following table presents the zero-shot performance of generalist vision foundation models on image captioning and object detection evaluation tasks. These models have not been exposed to the training data of the evaluation tasks during their training phase. | Method | #params | COCO Cap. test CIDEr | NoCaps val CIDEr | TextCaps val CIDEr | COCO Det. val2017 mAP | |--------|---------|----------------------|------------------|--------------------|-----------------------| | Flamingo | 80B | 84.3 | - | - | - | | Florence-2-base| 0.23B | 133.0 | 118.7 | 70.1 | 34.7 | | Florence-2-large| 0.77B | 135.6 | 120.8 | 72.8 | 37.5 | The following table continues the comparison with performance on other vision-language evaluation tasks. | Method | Flickr30k test R@1 | Refcoco val Accuracy | Refcoco test-A Accuracy | Refcoco test-B Accuracy | Refcoco+ val Accuracy | Refcoco+ test-A Accuracy | Refcoco+ test-B Accuracy | Refcocog val Accuracy | Refcocog test Accuracy | Refcoco RES val mIoU | |--------|----------------------|----------------------|-------------------------|-------------------------|-----------------------|--------------------------|--------------------------|-----------------------|------------------------|----------------------| | Kosmos-2 | 78.7 | 52.3 | 57.4 | 47.3 | 45.5 | 50.7 | 42.2 | 60.6 | 61.7 | - | | Florence-2-base | 83.6 | 53.9 | 58.4 | 49.7 | 51.5 | 56.4 | 47.9 | 66.3 | 65.1 | 34.6 | | Florence-2-large | 84.4 | 56.3 | 61.6 | 51.4 | 53.6 | 57.9 | 49.9 | 68.0 | 67.0 | 35.8 | ## Florence-2 finetuned performance We finetune Florence-2 models with a collection of downstream tasks, resulting two generalist models *Florence-2-base-ft* and *Florence-2-large-ft* that can conduct a wide range of downstream tasks. The table below compares the performance of specialist and generalist models on various captioning and Visual Question Answering (VQA) tasks. Specialist models are fine-tuned specifically for each task, whereas generalist models are fine-tuned in a task-agnostic manner across all tasks. The symbol "▲" indicates the usage of external OCR as input. | Method | # Params | COCO Caption Karpathy test CIDEr | NoCaps val CIDEr | TextCaps val CIDEr | VQAv2 test-dev Acc | TextVQA test-dev Acc | VizWiz VQA test-dev Acc | |----------------|----------|-----------------------------------|------------------|--------------------|--------------------|----------------------|-------------------------| | **Specialist Models** | | | | | | | | | CoCa | 2.1B | 143.6 | 122.4 | - | 82.3 | - | - | | BLIP-2 | 7.8B | 144.5 | 121.6 | - | 82.2 | - | - | | GIT2 | 5.1B | 145.0 | 126.9 | 148.6 | 81.7 | 67.3 | 71.0 | | Flamingo | 80B | 138.1 | - | - | 82.0 | 54.1 | 65.7 | | PaLI | 17B | 149.1 | 127.0 | 160.0▲ | 84.3 | 58.8 / 73.1▲ | 71.6 / 74.4▲ | | PaLI-X | 55B | 149.2 | 126.3 | 147.0 / 163.7▲ | 86.0 | 71.4 / 80.8▲ | 70.9 / 74.6▲ | | **Generalist Models** | | | | | | | | | Unified-IO | 2.9B | - | 100.0 | - | 77.9 | - | 57.4 | | Florence-2-base-ft | 0.23B | 140.0 | 116.7 | 143.9 | 79.7 | 63.6 | 63.6 | | Florence-2-large-ft | 0.77B | 143.3 | 124.9 | 151.1 | 81.7 | 73.5 | 72.6 | | Method | # Params | COCO Det. val2017 mAP | Flickr30k test R@1 | RefCOCO val Accuracy | RefCOCO test-A Accuracy | RefCOCO test-B Accuracy | RefCOCO+ val Accuracy | RefCOCO+ test-A Accuracy | RefCOCO+ test-B Accuracy | RefCOCOg val Accuracy | RefCOCOg test Accuracy | RefCOCO RES val mIoU | |----------------------|----------|-----------------------|--------------------|----------------------|-------------------------|-------------------------|------------------------|---------------------------|---------------------------|------------------------|-----------------------|------------------------| | **Specialist Models** | | | | | | | | | | | | | | SeqTR | - | - | - | 83.7 | 86.5 | 81.2 | 71.5 | 76.3 | 64.9 | 74.9 | 74.2 | - | | PolyFormer | - | - | - | 90.4 | 92.9 | 87.2 | 85.0 | 89.8 | 78.0 | 85.8 | 85.9 | 76.9 | | UNINEXT | 0.74B | 60.6 | - | 92.6 | 94.3 | 91.5 | 85.2 | 89.6 | 79.8 | 88.7 | 89.4 | - | | Ferret | 13B | - | - | 89.5 | 92.4 | 84.4 | 82.8 | 88.1 | 75.2 | 85.8 | 86.3 | - | | **Generalist Models** | | | | | | | | | | | | | | UniTAB | - | - | - | 88.6 | 91.1 | 83.8 | 81.0 | 85.4 | 71.6 | 84.6 | 84.7 | - | | Florence-2-base-ft | 0.23B | 41.4 | 84.0 | 92.6 | 94.8 | 91.5 | 86.8 | 91.7 | 82.2 | 89.8 | 82.2 | 78.0 | | Florence-2-large-ft| 0.77B | 43.4 | 85.2 | 93.4 | 95.3 | 92.0 | 88.3 | 92.9 | 83.6 | 91.2 | 91.7 | 80.5 | ## BibTex and citation info ``` @article{xiao2023florence, title={Florence-2: Advancing a unified representation for a variety of vision tasks}, author={Xiao, Bin and Wu, Haiping and Xu, Weijian and Dai, Xiyang and Hu, Houdong and Lu, Yumao and Zeng, Michael and Liu, Ce and Yuan, Lu}, journal={arXiv preprint arXiv:2311.06242}, year={2023} } ```
Qwen/Qwen2-1.5B
Qwen
"2024-06-06T14:36:18Z"
84,353
76
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "pretrained", "conversational", "en", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
"2024-05-31T08:41:32Z"
--- language: - en pipeline_tag: text-generation tags: - pretrained license: apache-2.0 --- # Qwen2-1.5B ## Introduction Qwen2 is the new series of Qwen large language models. For Qwen2, we release a number of base language models and instruction-tuned language models ranging from 0.5 to 72 billion parameters, including a Mixture-of-Experts model. This repo contains the 1.5B Qwen2 base language model. Compared with the state-of-the-art opensource language models, including the previous released Qwen1.5, Qwen2 has generally surpassed most opensource models and demonstrated competitiveness against proprietary models across a series of benchmarks targeting for language understanding, language generation, multilingual capability, coding, mathematics, reasoning, etc. For more details, please refer to our [blog](https://qwenlm.github.io/blog/qwen2/), [GitHub](https://github.com/QwenLM/Qwen2), and [Documentation](https://qwen.readthedocs.io/en/latest/). <br> ## Model Details Qwen2 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. ## Requirements The code of Qwen2 has been in the latest Hugging face transformers and we advise you to install `transformers>=4.37.0`, or you might encounter the following error: ``` KeyError: 'qwen2' ``` ## Usage We do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model. ## Performance The evaluation of base models mainly focuses on the model performance of natural language understanding, general question answering, coding, mathematics, scientific knowledge, reasoning, multilingual capability, etc. The datasets for evaluation include: **English Tasks**: MMLU (5-shot), MMLU-Pro (5-shot), GPQA (5shot), Theorem QA (5-shot), BBH (3-shot), HellaSwag (10-shot), Winogrande (5-shot), TruthfulQA (0-shot), ARC-C (25-shot) **Coding Tasks**: EvalPlus (0-shot) (HumanEval, MBPP, HumanEval+, MBPP+), MultiPL-E (0-shot) (Python, C++, JAVA, PHP, TypeScript, C#, Bash, JavaScript) **Math Tasks**: GSM8K (4-shot), MATH (4-shot) **Chinese Tasks**: C-Eval(5-shot), CMMLU (5-shot) **Multilingual Tasks**: Multi-Exam (M3Exam 5-shot, IndoMMLU 3-shot, ruMMLU 5-shot, mMMLU 5-shot), Multi-Understanding (BELEBELE 5-shot, XCOPA 5-shot, XWinograd 5-shot, XStoryCloze 0-shot, PAWS-X 5-shot), Multi-Mathematics (MGSM 8-shot), Multi-Translation (Flores-101 5-shot) #### Qwen2-0.5B & Qwen2-1.5B performances | Datasets | Phi-2 | Gemma-2B | MiniCPM | Qwen1.5-1.8B | Qwen2-0.5B | Qwen2-1.5B | | :--------| :---------: | :------------: | :------------: |:------------: | :------------: | :------------: | |#Non-Emb Params | 2.5B | 2.0B | 2.4B | 1.3B | 0.35B | 1.3B | |MMLU | 52.7 | 42.3 | 53.5 | 46.8 | 45.4 | **56.5** | |MMLU-Pro | - | 15.9 | - | - | 14.7 | 21.8 | |Theorem QA | - | - | - |- | 8.9 | **15.0** | |HumanEval | 47.6 | 22.0 |**50.0**| 20.1 | 22.0 | 31.1 | |MBPP | **55.0** | 29.2 | 47.3 | 18.0 | 22.0 | 37.4 | |GSM8K | 57.2 | 17.7 | 53.8 | 38.4 | 36.5 | **58.5** | |MATH | 3.5 | 11.8 | 10.2 | 10.1 | 10.7 | **21.7** | |BBH | **43.4** | 35.2 | 36.9 | 24.2 | 28.4 | 37.2 | |HellaSwag | **73.1** | 71.4 | 68.3 | 61.4 | 49.3 | 66.6 | |Winogrande | **74.4** | 66.8 | -| 60.3 | 56.8 | 66.2 | |ARC-C | **61.1** | 48.5 | -| 37.9 | 31.5 | 43.9 | |TruthfulQA | 44.5 | 33.1 | -| 39.4 | 39.7 | **45.9** | |C-Eval | 23.4 | 28.0 | 51.1| 59.7 | 58.2 | **70.6** | |CMMLU | 24.2 | - | 51.1 | 57.8 | 55.1 | **70.3** | ## Citation If you find our work helpful, feel free to give us a cite. ``` @article{qwen2, title={Qwen2 Technical Report}, year={2024} } ```
sileod/deberta-v3-base-tasksource-nli
sileod
"2024-08-13T21:12:51Z"
84,319
118
transformers
[ "transformers", "pytorch", "safetensors", "deberta-v2", "text-classification", "deberta-v3-base", "deberta-v3", "deberta", "nli", "natural-language-inference", "multitask", "multi-task", "pipeline", "extreme-multi-task", "extreme-mtl", "tasksource", "zero-shot", "rlhf", "zero-shot-classification", "en", "dataset:glue", "dataset:nyu-mll/multi_nli", "dataset:multi_nli", "dataset:super_glue", "dataset:anli", "dataset:tasksource/babi_nli", "dataset:sick", "dataset:snli", "dataset:scitail", "dataset:OpenAssistant/oasst1", "dataset:universal_dependencies", "dataset:hans", "dataset:qbao775/PARARULE-Plus", "dataset:alisawuffles/WANLI", "dataset:metaeval/recast", "dataset:sileod/probability_words_nli", "dataset:joey234/nan-nli", "dataset:pietrolesci/nli_fever", "dataset:pietrolesci/breaking_nli", "dataset:pietrolesci/conj_nli", "dataset:pietrolesci/fracas", "dataset:pietrolesci/dialogue_nli", "dataset:pietrolesci/mpe", "dataset:pietrolesci/dnc", "dataset:pietrolesci/gpt3_nli", "dataset:pietrolesci/recast_white", "dataset:pietrolesci/joci", "dataset:martn-nguyen/contrast_nli", "dataset:pietrolesci/robust_nli", "dataset:pietrolesci/robust_nli_is_sd", "dataset:pietrolesci/robust_nli_li_ts", "dataset:pietrolesci/gen_debiased_nli", "dataset:pietrolesci/add_one_rte", "dataset:metaeval/imppres", "dataset:pietrolesci/glue_diagnostics", "dataset:hlgd", "dataset:PolyAI/banking77", "dataset:paws", "dataset:quora", "dataset:medical_questions_pairs", "dataset:conll2003", "dataset:nlpaueb/finer-139", "dataset:Anthropic/hh-rlhf", "dataset:Anthropic/model-written-evals", "dataset:truthful_qa", "dataset:nightingal3/fig-qa", "dataset:tasksource/bigbench", "dataset:blimp", "dataset:cos_e", "dataset:cosmos_qa", "dataset:dream", "dataset:openbookqa", "dataset:qasc", "dataset:quartz", "dataset:quail", "dataset:head_qa", "dataset:sciq", "dataset:social_i_qa", "dataset:wiki_hop", "dataset:wiqa", "dataset:piqa", "dataset:hellaswag", "dataset:pkavumba/balanced-copa", "dataset:12ml/e-CARE", "dataset:art", "dataset:tasksource/mmlu", "dataset:winogrande", "dataset:codah", "dataset:ai2_arc", "dataset:definite_pronoun_resolution", "dataset:swag", "dataset:math_qa", "dataset:metaeval/utilitarianism", "dataset:mteb/amazon_counterfactual", "dataset:SetFit/insincere-questions", "dataset:SetFit/toxic_conversations", "dataset:turingbench/TuringBench", "dataset:trec", "dataset:tals/vitaminc", "dataset:hope_edi", "dataset:strombergnlp/rumoureval_2019", "dataset:ethos", "dataset:tweet_eval", "dataset:discovery", "dataset:pragmeval", "dataset:silicone", "dataset:lex_glue", "dataset:papluca/language-identification", "dataset:imdb", "dataset:rotten_tomatoes", "dataset:ag_news", "dataset:yelp_review_full", "dataset:financial_phrasebank", "dataset:poem_sentiment", "dataset:dbpedia_14", "dataset:amazon_polarity", "dataset:app_reviews", "dataset:hate_speech18", "dataset:sms_spam", "dataset:humicroedit", "dataset:snips_built_in_intents", "dataset:banking77", "dataset:hate_speech_offensive", "dataset:yahoo_answers_topics", "dataset:pacovaldez/stackoverflow-questions", "dataset:zapsdcn/hyperpartisan_news", "dataset:zapsdcn/sciie", "dataset:zapsdcn/citation_intent", "dataset:go_emotions", "dataset:allenai/scicite", "dataset:liar", "dataset:relbert/lexical_relation_classification", "dataset:metaeval/linguisticprobing", "dataset:tasksource/crowdflower", "dataset:metaeval/ethics", "dataset:emo", "dataset:google_wellformed_query", "dataset:tweets_hate_speech_detection", "dataset:has_part", "dataset:wnut_17", "dataset:ncbi_disease", "dataset:acronym_identification", "dataset:jnlpba", "dataset:species_800", "dataset:SpeedOfMagic/ontonotes_english", "dataset:blog_authorship_corpus", "dataset:launch/open_question_type", "dataset:health_fact", "dataset:commonsense_qa", "dataset:mc_taco", "dataset:ade_corpus_v2", "dataset:prajjwal1/discosense", "dataset:circa", "dataset:PiC/phrase_similarity", "dataset:copenlu/scientific-exaggeration-detection", "dataset:quarel", "dataset:mwong/fever-evidence-related", "dataset:numer_sense", "dataset:dynabench/dynasent", "dataset:raquiba/Sarcasm_News_Headline", "dataset:sem_eval_2010_task_8", "dataset:demo-org/auditor_review", "dataset:medmcqa", "dataset:aqua_rat", "dataset:RuyuanWan/Dynasent_Disagreement", "dataset:RuyuanWan/Politeness_Disagreement", "dataset:RuyuanWan/SBIC_Disagreement", "dataset:RuyuanWan/SChem_Disagreement", "dataset:RuyuanWan/Dilemmas_Disagreement", "dataset:lucasmccabe/logiqa", "dataset:wiki_qa", "dataset:metaeval/cycic_classification", "dataset:metaeval/cycic_multiplechoice", "dataset:metaeval/sts-companion", "dataset:metaeval/commonsense_qa_2.0", "dataset:metaeval/lingnli", "dataset:metaeval/monotonicity-entailment", "dataset:metaeval/arct", "dataset:metaeval/scinli", "dataset:metaeval/naturallogic", "dataset:onestop_qa", "dataset:demelin/moral_stories", "dataset:corypaik/prost", "dataset:aps/dynahate", "dataset:metaeval/syntactic-augmentation-nli", "dataset:metaeval/autotnli", "dataset:lasha-nlp/CONDAQA", "dataset:openai/webgpt_comparisons", "dataset:Dahoas/synthetic-instruct-gptj-pairwise", "dataset:metaeval/scruples", "dataset:metaeval/wouldyourather", "dataset:sileod/attempto-nli", "dataset:metaeval/defeasible-nli", "dataset:metaeval/help-nli", "dataset:metaeval/nli-veridicality-transitivity", "dataset:metaeval/natural-language-satisfiability", "dataset:metaeval/lonli", "dataset:tasksource/dadc-limit-nli", "dataset:ColumbiaNLP/FLUTE", "dataset:metaeval/strategy-qa", "dataset:openai/summarize_from_feedback", "dataset:tasksource/folio", "dataset:metaeval/tomi-nli", "dataset:metaeval/avicenna", "dataset:stanfordnlp/SHP", "dataset:GBaker/MedQA-USMLE-4-options-hf", "dataset:GBaker/MedQA-USMLE-4-options", "dataset:sileod/wikimedqa", "dataset:declare-lab/cicero", "dataset:amydeng2000/CREAK", "dataset:metaeval/mutual", "dataset:inverse-scaling/NeQA", "dataset:inverse-scaling/quote-repetition", "dataset:inverse-scaling/redefine-math", "dataset:tasksource/puzzte", "dataset:metaeval/implicatures", "dataset:race", "dataset:metaeval/spartqa-yn", "dataset:metaeval/spartqa-mchoice", "dataset:metaeval/temporal-nli", "dataset:metaeval/ScienceQA_text_only", "dataset:AndyChiang/cloth", "dataset:metaeval/logiqa-2.0-nli", "dataset:tasksource/oasst1_dense_flat", "dataset:metaeval/boolq-natural-perturbations", "dataset:metaeval/path-naturalness-prediction", "dataset:riddle_sense", "dataset:Jiangjie/ekar_english", "dataset:metaeval/implicit-hate-stg1", "dataset:metaeval/chaos-mnli-ambiguity", "dataset:IlyaGusev/headline_cause", "dataset:metaeval/race-c", "dataset:metaeval/equate", "dataset:metaeval/ambient", "dataset:AndyChiang/dgen", "dataset:metaeval/clcd-english", "dataset:civil_comments", "dataset:metaeval/acceptability-prediction", "dataset:maximedb/twentyquestions", "dataset:metaeval/counterfactually-augmented-snli", "dataset:tasksource/I2D2", "dataset:sileod/mindgames", "dataset:metaeval/counterfactually-augmented-imdb", "dataset:metaeval/cnli", "dataset:metaeval/reclor", "dataset:tasksource/oasst1_pairwise_rlhf_reward", "dataset:tasksource/zero-shot-label-nli", "dataset:webis/args_me", "dataset:webis/Touche23-ValueEval", "dataset:tasksource/starcon", "dataset:tasksource/ruletaker", "dataset:lighteval/lsat_qa", "dataset:tasksource/ConTRoL-nli", "dataset:tasksource/tracie", "dataset:tasksource/sherliic", "dataset:tasksource/sen-making", "dataset:tasksource/winowhy", "dataset:mediabiasgroup/mbib-base", "dataset:tasksource/robustLR", "dataset:CLUTRR/v1", "dataset:tasksource/logical-fallacy", "dataset:tasksource/parade", "dataset:tasksource/cladder", "dataset:tasksource/subjectivity", "dataset:tasksource/MOH", "dataset:tasksource/VUAC", "dataset:tasksource/TroFi", "dataset:sharc_modified", "dataset:tasksource/conceptrules_v2", "dataset:tasksource/disrpt", "dataset:conll2000", "dataset:DFKI-SLT/few-nerd", "dataset:tasksource/com2sense", "dataset:tasksource/scone", "dataset:tasksource/winodict", "dataset:tasksource/fool-me-twice", "dataset:tasksource/monli", "dataset:tasksource/corr2cause", "dataset:tasksource/apt", "dataset:zeroshot/twitter-financial-news-sentiment", "dataset:tasksource/icl-symbol-tuning-instruct", "dataset:tasksource/SpaceNLI", "dataset:sihaochen/propsegment", "dataset:HannahRoseKirk/HatemojiBuild", "dataset:tasksource/regset", "dataset:lmsys/chatbot_arena_conversations", "dataset:tasksource/nlgraph", "arxiv:2301.05948", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
zero-shot-classification
"2023-01-13T13:47:22Z"
--- license: apache-2.0 language: en tags: - deberta-v3-base - deberta-v3 - deberta - text-classification - nli - natural-language-inference - multitask - multi-task - pipeline - extreme-multi-task - extreme-mtl - tasksource - zero-shot - rlhf model-index: - name: deberta-v3-base-tasksource-nli results: - task: type: text-classification name: Text Classification dataset: name: glue type: glue config: rte split: validation metrics: - type: accuracy value: 0.89 - task: type: natural-language-inference name: Natural Language Inference dataset: name: anli-r3 type: anli config: plain_text split: validation metrics: - type: accuracy value: 0.52 name: Accuracy datasets: - glue - nyu-mll/multi_nli - multi_nli - super_glue - anli - tasksource/babi_nli - sick - snli - scitail - OpenAssistant/oasst1 - universal_dependencies - hans - qbao775/PARARULE-Plus - alisawuffles/WANLI - metaeval/recast - sileod/probability_words_nli - joey234/nan-nli - pietrolesci/nli_fever - pietrolesci/breaking_nli - pietrolesci/conj_nli - pietrolesci/fracas - pietrolesci/dialogue_nli - pietrolesci/mpe - pietrolesci/dnc - pietrolesci/gpt3_nli - pietrolesci/recast_white - pietrolesci/joci - martn-nguyen/contrast_nli - pietrolesci/robust_nli - pietrolesci/robust_nli_is_sd - pietrolesci/robust_nli_li_ts - pietrolesci/gen_debiased_nli - pietrolesci/add_one_rte - metaeval/imppres - pietrolesci/glue_diagnostics - hlgd - PolyAI/banking77 - paws - quora - medical_questions_pairs - conll2003 - nlpaueb/finer-139 - Anthropic/hh-rlhf - Anthropic/model-written-evals - truthful_qa - nightingal3/fig-qa - tasksource/bigbench - blimp - cos_e - cosmos_qa - dream - openbookqa - qasc - quartz - quail - head_qa - sciq - social_i_qa - wiki_hop - wiqa - piqa - hellaswag - pkavumba/balanced-copa - 12ml/e-CARE - art - tasksource/mmlu - winogrande - codah - ai2_arc - definite_pronoun_resolution - swag - math_qa - metaeval/utilitarianism - mteb/amazon_counterfactual - SetFit/insincere-questions - SetFit/toxic_conversations - turingbench/TuringBench - trec - tals/vitaminc - hope_edi - strombergnlp/rumoureval_2019 - ethos - tweet_eval - discovery - pragmeval - silicone - lex_glue - papluca/language-identification - imdb - rotten_tomatoes - ag_news - yelp_review_full - financial_phrasebank - poem_sentiment - dbpedia_14 - amazon_polarity - app_reviews - hate_speech18 - sms_spam - humicroedit - snips_built_in_intents - banking77 - hate_speech_offensive - yahoo_answers_topics - pacovaldez/stackoverflow-questions - zapsdcn/hyperpartisan_news - zapsdcn/sciie - zapsdcn/citation_intent - go_emotions - allenai/scicite - liar - relbert/lexical_relation_classification - metaeval/linguisticprobing - tasksource/crowdflower - metaeval/ethics - emo - google_wellformed_query - tweets_hate_speech_detection - has_part - wnut_17 - ncbi_disease - acronym_identification - jnlpba - species_800 - SpeedOfMagic/ontonotes_english - blog_authorship_corpus - launch/open_question_type - health_fact - commonsense_qa - mc_taco - ade_corpus_v2 - prajjwal1/discosense - circa - PiC/phrase_similarity - copenlu/scientific-exaggeration-detection - quarel - mwong/fever-evidence-related - numer_sense - dynabench/dynasent - raquiba/Sarcasm_News_Headline - sem_eval_2010_task_8 - demo-org/auditor_review - medmcqa - aqua_rat - RuyuanWan/Dynasent_Disagreement - RuyuanWan/Politeness_Disagreement - RuyuanWan/SBIC_Disagreement - RuyuanWan/SChem_Disagreement - RuyuanWan/Dilemmas_Disagreement - lucasmccabe/logiqa - wiki_qa - metaeval/cycic_classification - metaeval/cycic_multiplechoice - metaeval/sts-companion - metaeval/commonsense_qa_2.0 - metaeval/lingnli - metaeval/monotonicity-entailment - metaeval/arct - metaeval/scinli - metaeval/naturallogic - onestop_qa - demelin/moral_stories - corypaik/prost - aps/dynahate - metaeval/syntactic-augmentation-nli - metaeval/autotnli - lasha-nlp/CONDAQA - openai/webgpt_comparisons - Dahoas/synthetic-instruct-gptj-pairwise - metaeval/scruples - metaeval/wouldyourather - sileod/attempto-nli - metaeval/defeasible-nli - metaeval/help-nli - metaeval/nli-veridicality-transitivity - metaeval/natural-language-satisfiability - metaeval/lonli - tasksource/dadc-limit-nli - ColumbiaNLP/FLUTE - metaeval/strategy-qa - openai/summarize_from_feedback - tasksource/folio - metaeval/tomi-nli - metaeval/avicenna - stanfordnlp/SHP - GBaker/MedQA-USMLE-4-options-hf - GBaker/MedQA-USMLE-4-options - sileod/wikimedqa - declare-lab/cicero - amydeng2000/CREAK - metaeval/mutual - inverse-scaling/NeQA - inverse-scaling/quote-repetition - inverse-scaling/redefine-math - tasksource/puzzte - metaeval/implicatures - race - metaeval/spartqa-yn - metaeval/spartqa-mchoice - metaeval/temporal-nli - metaeval/ScienceQA_text_only - AndyChiang/cloth - metaeval/logiqa-2.0-nli - tasksource/oasst1_dense_flat - metaeval/boolq-natural-perturbations - metaeval/path-naturalness-prediction - riddle_sense - Jiangjie/ekar_english - metaeval/implicit-hate-stg1 - metaeval/chaos-mnli-ambiguity - IlyaGusev/headline_cause - metaeval/race-c - metaeval/equate - metaeval/ambient - AndyChiang/dgen - metaeval/clcd-english - civil_comments - metaeval/acceptability-prediction - maximedb/twentyquestions - metaeval/counterfactually-augmented-snli - tasksource/I2D2 - sileod/mindgames - metaeval/counterfactually-augmented-imdb - metaeval/cnli - metaeval/reclor - tasksource/oasst1_pairwise_rlhf_reward - tasksource/zero-shot-label-nli - webis/args_me - webis/Touche23-ValueEval - tasksource/starcon - tasksource/ruletaker - lighteval/lsat_qa - tasksource/ConTRoL-nli - tasksource/tracie - tasksource/sherliic - tasksource/sen-making - tasksource/winowhy - mediabiasgroup/mbib-base - tasksource/robustLR - CLUTRR/v1 - tasksource/logical-fallacy - tasksource/parade - tasksource/cladder - tasksource/subjectivity - tasksource/MOH - tasksource/VUAC - tasksource/TroFi - sharc_modified - tasksource/conceptrules_v2 - tasksource/disrpt - conll2000 - DFKI-SLT/few-nerd - tasksource/com2sense - tasksource/scone - tasksource/winodict - tasksource/fool-me-twice - tasksource/monli - tasksource/corr2cause - tasksource/apt - zeroshot/twitter-financial-news-sentiment - tasksource/icl-symbol-tuning-instruct - tasksource/SpaceNLI - sihaochen/propsegment - HannahRoseKirk/HatemojiBuild - tasksource/regset - tasksource/babi_nli - lmsys/chatbot_arena_conversations - tasksource/nlgraph metrics: - accuracy library_name: transformers pipeline_tag: zero-shot-classification --- # Model Card for DeBERTa-v3-base-tasksource-nli --- **NOTE** Deprecated: use https://huggingface.co/tasksource/deberta-small-long-nli for longer context and better accuracy. --- This is [DeBERTa-v3-base](https://hf.co/microsoft/deberta-v3-base) fine-tuned with multi-task learning on 600+ tasks of the [tasksource collection](https://github.com/sileod/tasksource/). This checkpoint has strong zero-shot validation performance on many tasks (e.g. 70% on WNLI), and can be used for: - Zero-shot entailment-based classification for arbitrary labels [ZS]. - Natural language inference [NLI] - Hundreds of previous tasks with tasksource-adapters [TA]. - Further fine-tuning on a new task or tasksource task (classification, token classification or multiple-choice) [FT]. # [ZS] Zero-shot classification pipeline ```python from transformers import pipeline classifier = pipeline("zero-shot-classification",model="sileod/deberta-v3-base-tasksource-nli") text = "one day I will see the world" candidate_labels = ['travel', 'cooking', 'dancing'] classifier(text, candidate_labels) ``` NLI training data of this model includes [label-nli](https://huggingface.co/datasets/tasksource/zero-shot-label-nli), a NLI dataset specially constructed to improve this kind of zero-shot classification. # [NLI] Natural language inference pipeline ```python from transformers import pipeline pipe = pipeline("text-classification",model="sileod/deberta-v3-base-tasksource-nli") pipe([dict(text='there is a cat', text_pair='there is a black cat')]) #list of (premise,hypothesis) # [{'label': 'neutral', 'score': 0.9952911138534546}] ``` # [TA] Tasksource-adapters: 1 line access to hundreds of tasks ```python # !pip install tasknet import tasknet as tn pipe = tn.load_pipeline('sileod/deberta-v3-base-tasksource-nli','glue/sst2') # works for 500+ tasksource tasks pipe(['That movie was great !', 'Awful movie.']) # [{'label': 'positive', 'score': 0.9956}, {'label': 'negative', 'score': 0.9967}] ``` The list of tasks is available in model config.json. This is more efficient than ZS since it requires only one forward pass per example, but it is less flexible. # [FT] Tasknet: 3 lines fine-tuning ```python # !pip install tasknet import tasknet as tn hparams=dict(model_name='sileod/deberta-v3-base-tasksource-nli', learning_rate=2e-5) model, trainer = tn.Model_Trainer([tn.AutoTask("glue/rte")], hparams) trainer.train() ``` ## Evaluation This model ranked 1st among all models with the microsoft/deberta-v3-base architecture according to the IBM model recycling evaluation. https://ibm.github.io/model-recycling/ ### Software and training details The model was trained on 600 tasks for 200k steps with a batch size of 384 and a peak learning rate of 2e-5. Training took 15 days on Nvidia A30 24GB gpu. This is the shared model with the MNLI classifier on top. Each task had a specific CLS embedding, which is dropped 10% of the time to facilitate model use without it. All multiple-choice model used the same classification layers. For classification tasks, models shared weights if their labels matched. https://github.com/sileod/tasksource/ \ https://github.com/sileod/tasknet/ \ Training code: https://colab.research.google.com/drive/1iB4Oxl9_B5W3ZDzXoWJN-olUbqLBxgQS?usp=sharing # Citation More details on this [article:](https://arxiv.org/abs/2301.05948) ``` @article{sileo2023tasksource, title={tasksource: Structured Dataset Preprocessing Annotations for Frictionless Extreme Multi-Task Learning and Evaluation}, author={Sileo, Damien}, url= {https://arxiv.org/abs/2301.05948}, journal={arXiv preprint arXiv:2301.05948}, year={2023} } ``` # Model Card Contact damien.sileo@inria.fr </details>
dreamlike-art/dreamlike-photoreal-2.0
dreamlike-art
"2023-03-13T01:05:06Z"
84,308
1,676
diffusers
[ "diffusers", "safetensors", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "photorealistic", "photoreal", "en", "license:other", "autotrain_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
"2023-01-04T03:01:40Z"
--- language: - en license: other tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - photorealistic - photoreal - diffusers inference: false --- # Dreamlike Photoreal 2.0 is a photorealistic model based on Stable Diffusion 1.5, made by [dreamlike.art](https://dreamlike.art/). # If you want to use dreamlike models on your website/app/etc., check the license at the bottom first! Warning: This model is horny! Add "nude, naked" to the negative prompt if want to avoid NSFW. You can add **photo** to your prompt to make your gens look more photorealistic. Non-square aspect ratios work better for some prompts. If you want a portrait photo, try using a vertical aspect ratio. If you want a landscape photo, try using a horizontal aspect ratio. This model was trained on 768x768px images, so use 768x768px, 640x896px, 896x640px, etc. It also works pretty good with higher resolutions such as 768x1024px or 1024x768px. ### Examples <img src="https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0/resolve/main/preview1.jpg" style="max-width: 800px;" width="100%"/> <img src="https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0/resolve/main/preview2.jpg" style="max-width: 800px;" width="100%"/> <img src="https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0/resolve/main/preview3.jpg" style="max-width: 800px;" width="100%"/> ### dreamlike.art You can use this model for free on [dreamlike.art](https://dreamlike.art/)! <img src="https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0/resolve/main/dreamlike.jpg" style="max-width: 1000px;" width="100%"/> ### CKPT [Download dreamlike-photoreal-2.0.ckpt (2.13GB)](https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0/resolve/main/dreamlike-photoreal-2.0.ckpt) ### Safetensors [Download dreamlike-photoreal-2.0.safetensors (2.13GB)](https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0/resolve/main/dreamlike-photoreal-2.0.safetensors) ### 🧨 Diffusers This model can be used just like any other Stable Diffusion model. For more information, please have a look at the [Stable Diffusion Pipeline](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion). ```python from diffusers import StableDiffusionPipeline import torch model_id = "dreamlike-art/dreamlike-photoreal-2.0" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "photo, a church in the middle of a field of crops, bright cinematic lighting, gopro, fisheye lens" image = pipe(prompt).images[0] image.save("./result.jpg") ``` <img src="https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0/resolve/main/church.jpg" style="max-width: 640px;" width="100%"/> # License This model is licesed under a **modified** CreativeML OpenRAIL-M license. - **You are not allowed to host, finetune, or do inference with the model or its derivatives on websites/apps/etc. If you want to, please email us at contact@dreamlike.art** - **You are free to host the model card and files (Without any actual inference or finetuning) on both commercial and non-commercial websites/apps/etc. Please state the full model name (Dreamlike Photoreal 2.0) and include the license as well as a link to the model card (https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0)** - **You are free to use the outputs (images) of the model for commercial purposes in teams of 10 or less** - You can't use the model to deliberately produce nor share illegal or harmful outputs or content - The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license - You may re-distribute the weights. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the **modified** CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license here: https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0/blob/main/LICENSE.md
TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ
TheBloke
"2023-12-14T14:30:44Z"
84,026
130
transformers
[ "transformers", "safetensors", "mixtral", "text-generation", "conversational", "fr", "it", "de", "es", "en", "base_model:mistralai/Mixtral-8x7B-Instruct-v0.1", "base_model:quantized:mistralai/Mixtral-8x7B-Instruct-v0.1", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "4-bit", "gptq", "region:us" ]
text-generation
"2023-12-11T18:49:53Z"
--- base_model: mistralai/Mixtral-8x7B-Instruct-v0.1 inference: false language: - fr - it - de - es - en license: apache-2.0 model_creator: Mistral AI_ model_name: Mixtral 8X7B Instruct v0.1 model_type: mixtral prompt_template: '[INST] {prompt} [/INST] ' quantized_by: TheBloke widget: - output: text: 'Arr, shiver me timbers! Ye have a llama on yer lawn, ye say? Well, that be a new one for me! Here''s what I''d suggest, arr: 1. Firstly, ensure yer safety. Llamas may look gentle, but they can be protective if they feel threatened. 2. Try to make the area less appealing to the llama. Remove any food sources or water that might be attracting it. 3. Contact local animal control or a wildlife rescue organization. They be the experts and can provide humane ways to remove the llama from yer property. 4. If ye have any experience with animals, you could try to gently herd the llama towards a nearby field or open space. But be careful, arr! Remember, arr, it be important to treat the llama with respect and care. It be a creature just trying to survive, like the rest of us.' text: '[INST] You are a pirate chatbot who always responds with Arr and pirate speak! There''s a llama on my lawn, how can I get rid of him? [/INST]' --- <!-- markdownlint-disable MD041 --> <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Mixtral 8X7B Instruct v0.1 - GPTQ - Model creator: [Mistral AI_](https://huggingface.co/mistralai) - Original model: [Mixtral 8X7B Instruct v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) <!-- description start --> # Description This repo contains GPTQ model files for [Mistral AI_'s Mixtral 8X7B Instruct v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1). Mixtral GPTQs currently require: * Transformers 4.36.0 or later * either, AutoGPTQ 0.6 compiled from source, or * Transformers 4.37.0.dev0 compiled from Github with: `pip3 install git+https://github.com/huggingface/transformers` Multiple GPTQ parameter permutations are provided; see Provided Files below for details of the options provided, their parameters, and the software used to create them. <!-- description end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF) * [Mistral AI_'s original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: Mistral ``` [INST] {prompt} [/INST] ``` <!-- prompt-template end --> <!-- README_GPTQ.md-compatible clients start --> ## Known compatible clients / servers GPTQ models are currently supported on Linux (NVidia/AMD) and Windows (NVidia only). macOS users: please use GGUF models. Mixtral GPTQs currently have special requirements - see Description above. <!-- README_GPTQ.md-compatible clients end --> <!-- README_GPTQ.md-provided-files start --> ## Provided files, and GPTQ parameters Multiple quantisation parameters are provided, to allow you to choose the best one for your hardware and requirements. Each separate quant is in a different branch. See below for instructions on fetching from different branches. Most GPTQ files are made with AutoGPTQ. Mistral models are currently made with Transformers. <details> <summary>Explanation of GPTQ parameters</summary> - Bits: The bit size of the quantised model. - GS: GPTQ group size. Higher numbers use less VRAM, but have lower quantisation accuracy. "None" is the lowest possible value. - Act Order: True or False. Also known as `desc_act`. True results in better quantisation accuracy. Some GPTQ clients have had issues with models that use Act Order plus Group Size, but this is generally resolved now. - Damp %: A GPTQ parameter that affects how samples are processed for quantisation. 0.01 is default, but 0.1 results in slightly better accuracy. - GPTQ dataset: The calibration dataset used during quantisation. Using a dataset more appropriate to the model's training can improve quantisation accuracy. Note that the GPTQ calibration dataset is not the same as the dataset used to train the model - please refer to the original model repo for details of the training dataset(s). - Sequence Length: The length of the dataset sequences used for quantisation. Ideally this is the same as the model sequence length. For some very long sequence models (16+K), a lower sequence length may have to be used. Note that a lower sequence length does not limit the sequence length of the quantised model. It only impacts the quantisation accuracy on longer inference sequences. - ExLlama Compatibility: Whether this file can be loaded with ExLlama, which currently only supports Llama and Mistral models in 4-bit. </details> | Branch | Bits | GS | Act Order | Damp % | GPTQ Dataset | Seq Len | Size | ExLlama | Desc | | ------ | ---- | -- | --------- | ------ | ------------ | ------- | ---- | ------- | ---- | | [main](https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ/tree/main) | 4 | None | Yes | 0.1 | [VMware Open Instruct](https://huggingface.co/datasets/VMware/open-instruct/viewer/) | 8192 | 23.81 GB | No | 4-bit, with Act Order. No group size, to lower VRAM requirements. | | [gptq-4bit-128g-actorder_True](https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ/tree/gptq-4bit-128g-actorder_True) | 4 | 128 | Yes | 0.1 | [VMware Open Instruct](https://huggingface.co/datasets/VMware/open-instruct/viewer/) | 8192 | 24.70 GB | No | 4-bit, with Act Order and group size 128g. Uses even less VRAM than 64g, but with slightly lower accuracy. | | [gptq-4bit-32g-actorder_True](https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ/tree/gptq-4bit-32g-actorder_True) | 4 | 32 | Yes | 0.1 | [VMware Open Instruct](https://huggingface.co/datasets/VMware/open-instruct/viewer/) | 8192 | 27.42 GB | No | 4-bit, with Act Order and group size 32g. Gives highest possible inference quality, with maximum VRAM usage. | | [gptq-3bit--1g-actorder_True](https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ/tree/gptq-3bit--1g-actorder_True) | 3 | None | Yes | 0.1 | [VMware Open Instruct](https://huggingface.co/datasets/VMware/open-instruct/viewer/) | 8192 | 18.01 GB | No | 3-bit, with Act Order and no group size. Lowest possible VRAM requirements. May be lower quality than 3-bit 128g. | | [gptq-3bit-128g-actorder_True](https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ/tree/gptq-3bit-128g-actorder_True) | 3 | 128 | Yes | 0.1 | [VMware Open Instruct](https://huggingface.co/datasets/VMware/open-instruct/viewer/) | 8192 | 18.85 GB | No | 3-bit, with group size 128g and act-order. Higher quality than 128g-False. | | [gptq-8bit--1g-actorder_True](https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ/tree/gptq-8bit--1g-actorder_True) | 8 | None | Yes | 0.1 | [VMware Open Instruct](https://huggingface.co/datasets/VMware/open-instruct/viewer/) | 8192 | 47.04 GB | No | 8-bit, with Act Order. No group size, to lower VRAM requirements. | | [gptq-8bit-128g-actorder_True](https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ/tree/gptq-8bit-128g-actorder_True) | 8 | 128 | Yes | 0.1 | [VMware Open Instruct](https://huggingface.co/datasets/VMware/open-instruct/viewer/) | 8192 | 48.10 GB | No | 8-bit, with group size 128g for higher inference quality and with Act Order for even higher accuracy. | <!-- README_GPTQ.md-provided-files end --> <!-- README_GPTQ.md-download-from-branches start --> ## How to download, including from branches ### In text-generation-webui To download from the `main` branch, enter `TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ` in the "Download model" box. To download from another branch, add `:branchname` to the end of the download name, eg `TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ:gptq-4bit-128g-actorder_True` ### From the command line I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub ``` To download the `main` branch to a folder called `Mixtral-8x7B-Instruct-v0.1-GPTQ`: ```shell mkdir Mixtral-8x7B-Instruct-v0.1-GPTQ huggingface-cli download TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ --local-dir Mixtral-8x7B-Instruct-v0.1-GPTQ --local-dir-use-symlinks False ``` To download from a different branch, add the `--revision` parameter: ```shell mkdir Mixtral-8x7B-Instruct-v0.1-GPTQ huggingface-cli download TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ --revision gptq-4bit-128g-actorder_True --local-dir Mixtral-8x7B-Instruct-v0.1-GPTQ --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage</summary> If you remove the `--local-dir-use-symlinks False` parameter, the files will instead be stored in the central Hugging Face cache directory (default location on Linux is: `~/.cache/huggingface`), and symlinks will be added to the specified `--local-dir`, pointing to their real location in the cache. This allows for interrupted downloads to be resumed, and allows you to quickly clone the repo to multiple places on disk without triggering a download again. The downside, and the reason why I don't list that as the default option, is that the files are then hidden away in a cache folder and it's harder to know where your disk space is being used, and to clear it up if/when you want to remove a download model. The cache location can be changed with the `HF_HOME` environment variable, and/or the `--cache-dir` parameter to `huggingface-cli`. For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell mkdir Mixtral-8x7B-Instruct-v0.1-GPTQ HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ --local-dir Mixtral-8x7B-Instruct-v0.1-GPTQ --local-dir-use-symlinks False ``` Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command. </details> ### With `git` (**not** recommended) To clone a specific branch with `git`, use a command like this: ```shell git clone --single-branch --branch gptq-4bit-128g-actorder_True https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ ``` Note that using Git with HF repos is strongly discouraged. It will be much slower than using `huggingface-hub`, and will use twice as much disk space as it has to store the model files twice (it stores every byte both in the intended target folder, and again in the `.git` folder as a blob.) <!-- README_GPTQ.md-download-from-branches end --> <!-- README_GPTQ.md-text-generation-webui start --> ## How to easily download and use this model in [text-generation-webui](https://github.com/oobabooga/text-generation-webui) **NOTE**: Requires: * Transformers 4.36.0, or Transformers 4.37.0.dev0 from Github * Either AutoGPTQ 0.6 compiled from source and `Loader: AutoGPTQ`, * or, `Loader: Transformers`, if you installed Transformers from Github: `pip3 install git+https://github.com/huggingface/transformers` Please make sure you're using the latest version of [text-generation-webui](https://github.com/oobabooga/text-generation-webui). It is strongly recommended to use the text-generation-webui one-click-installers unless you're sure you know how to make a manual install. 1. Click the **Model tab**. 2. Under **Download custom model or LoRA**, enter `TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ`. - To download from a specific branch, enter for example `TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ:gptq-4bit-128g-actorder_True` - see Provided Files above for the list of branches for each option. 3. Click **Download**. 4. The model will start downloading. Once it's finished it will say "Done". 5. In the top left, click the refresh icon next to **Model**. 6. In the **Model** dropdown, choose the model you just downloaded: `Mixtral-8x7B-Instruct-v0.1-GPTQ` 7. The model will automatically load, and is now ready for use! 8. If you want any custom settings, set them and then click **Save settings for this model** followed by **Reload the Model** in the top right. - Note that you do not need to and should not set manual GPTQ parameters any more. These are set automatically from the file `quantize_config.json`. 9. Once you're ready, click the **Text Generation** tab and enter a prompt to get started! <!-- README_GPTQ.md-text-generation-webui end --> <!-- README_GPTQ.md-use-from-tgi start --> ## Serving this model from Text Generation Inference (TGI) Not currently supported for Mixtral models. <!-- README_GPTQ.md-use-from-tgi end --> <!-- README_GPTQ.md-use-from-python start --> ## Python code example: inference from this GPTQ model ### Install the necessary packages Requires: Transformers 4.37.0.dev0 from Github, Optimum 1.16.0 or later, and AutoGPTQ 0.5.1 or later. ```shell pip3 install --upgrade "git+https://github.com/huggingface/transformers" optimum # If using PyTorch 2.1 + CUDA 12.x: pip3 install --upgrade auto-gptq # or, if using PyTorch 2.1 + CUDA 11.x: pip3 install --upgrade auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/ ``` If you are using PyTorch 2.0, you will need to install AutoGPTQ from source. Likewise if you have problems with the pre-built wheels, you should try building from source: ```shell pip3 uninstall -y auto-gptq git clone https://github.com/PanQiWei/AutoGPTQ cd AutoGPTQ DISABLE_QIGEN=1 pip3 install . ``` ### Example Python code ```python from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_name_or_path = "TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ" # To use a different branch, change revision # For example: revision="gptq-4bit-128g-actorder_True" model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto", trust_remote_code=False, revision="main") tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True) prompt = "Write a story about llamas" system_message = "You are a story writing assistant" prompt_template=f'''[INST] {prompt} [/INST] ''' print("\n\n*** Generate:") input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda() output = model.generate(inputs=input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512) print(tokenizer.decode(output[0])) # Inference can also be done using transformers' pipeline print("*** Pipeline:") pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, do_sample=True, temperature=0.7, top_p=0.95, top_k=40, repetition_penalty=1.1 ) print(pipe(prompt_template)[0]['generated_text']) ``` <!-- README_GPTQ.md-use-from-python end --> <!-- README_GPTQ.md-compatibility start --> ## Compatibility The files provided are tested to work with AutoGPTQ 0.6 (compiled from source) and Transformers 4.37.0 (installed from Github). <!-- README_GPTQ.md-compatibility end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Michael Levine, 阿明, Trailburnt, Nikolai Manek, John Detwiler, Randy H, Will Dee, Sebastain Graf, NimbleBox.ai, Eugene Pentland, Emad Mostaque, Ai Maven, Jim Angel, Jeff Scroggin, Michael Davis, Manuel Alberto Morcote, Stephen Murray, Robert, Justin Joy, Luke @flexchar, Brandon Frisco, Elijah Stavena, S_X, Dan Guido, Undi ., Komninos Chatzipapas, Shadi, theTransient, Lone Striker, Raven Klaugh, jjj, Cap'n Zoog, Michel-Marie MAUDET (LINAGORA), Matthew Berman, David, Fen Risland, Omer Bin Jawed, Luke Pendergrass, Kalila, OG, Erik Bjäreholt, Rooh Singh, Joseph William Delisle, Dan Lewis, TL, John Villwock, AzureBlack, Brad, Pedro Madruga, Caitlyn Gatomon, K, jinyuan sun, Mano Prime, Alex, Jeffrey Morgan, Alicia Loh, Illia Dulskyi, Chadd, transmissions 11, fincy, Rainer Wilmers, ReadyPlayerEmma, knownsqashed, Mandus, biorpg, Deo Leter, Brandon Phillips, SuperWojo, Sean Connelly, Iucharbius, Jack West, Harry Royden McLaughlin, Nicholas, terasurfer, Vitor Caleffi, Duane Dunston, Johann-Peter Hartmann, David Ziegler, Olakabola, Ken Nordquist, Trenton Dambrowitz, Tom X Nguyen, Vadim, Ajan Kanaga, Leonard Tan, Clay Pascal, Alexandros Triantafyllidis, JM33133, Xule, vamX, ya boyyy, subjectnull, Talal Aujan, Alps Aficionado, wassieverse, Ari Malik, James Bentley, Woland, Spencer Kim, Michael Dempsey, Fred von Graf, Elle, zynix, William Richards, Stanislav Ovsiannikov, Edmond Seymore, Jonathan Leane, Martin Kemka, usrbinkat, Enrico Ros Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> # Original model card: Mistral AI_'s Mixtral 8X7B Instruct v0.1 # Model Card for Mixtral-8x7B The Mixtral-8x7B Large Language Model (LLM) is a pretrained generative Sparse Mixture of Experts. The Mixtral-8x7B outperforms Llama 2 70B on most benchmarks we tested. For full details of this model please read our [release blog post](https://mistral.ai/news/mixtral-of-experts/). ## Warning This repo contains weights that are compatible with [vLLM](https://github.com/vllm-project/vllm) serving of the model as well as Hugging Face [transformers](https://github.com/huggingface/transformers) library. It is based on the original Mixtral [torrent release](magnet:?xt=urn:btih:5546272da9065eddeb6fcd7ffddeef5b75be79a7&dn=mixtral-8x7b-32kseqlen&tr=udp%3A%2F%http://2Fopentracker.i2p.rocks%3A6969%2Fannounce&tr=http%3A%2F%http://2Ftracker.openbittorrent.com%3A80%2Fannounce), but the file format and parameter names are different. Please note that model cannot (yet) be instantiated with HF. ## Instruction format This format must be strictly respected, otherwise the model will generate sub-optimal outputs. The template used to build a prompt for the Instruct model is defined as follows: ``` <s> [INST] Instruction [/INST] Model answer</s> [INST] Follow-up instruction [/INST] ``` Note that `<s>` and `</s>` are special tokens for beginning of string (BOS) and end of string (EOS) while [INST] and [/INST] are regular strings. As reference, here is the pseudo-code used to tokenize instructions during fine-tuning: ```python def tokenize(text): return tok.encode(text, add_special_tokens=False) [BOS_ID] + tokenize("[INST]") + tokenize(USER_MESSAGE_1) + tokenize("[/INST]") + tokenize(BOT_MESSAGE_1) + [EOS_ID] + … tokenize("[INST]") + tokenize(USER_MESSAGE_N) + tokenize("[/INST]") + tokenize(BOT_MESSAGE_N) + [EOS_ID] ``` In the pseudo-code above, note that the `tokenize` method should not add a BOS or EOS token automatically, but should add a prefix space. ## Run the model ```python from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) text = "Hello my name is" inputs = tokenizer(text, return_tensors="pt") outputs = model.generate(**inputs, max_new_tokens=20) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` By default, transformers will load the model in full precision. Therefore you might be interested to further reduce down the memory requirements to run the model through the optimizations we offer in HF ecosystem: ### In half-precision Note `float16` precision only works on GPU devices <details> <summary> Click to expand </summary> ```diff + import torch from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1" tokenizer = AutoTokenizer.from_pretrained(model_id) + model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16).to(0) text = "Hello my name is" + inputs = tokenizer(text, return_tensors="pt").to(0) outputs = model.generate(**inputs, max_new_tokens=20) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` </details> ### Lower precision using (8-bit & 4-bit) using `bitsandbytes` <details> <summary> Click to expand </summary> ```diff + import torch from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1" tokenizer = AutoTokenizer.from_pretrained(model_id) + model = AutoModelForCausalLM.from_pretrained(model_id, load_in_4bit=True) text = "Hello my name is" + inputs = tokenizer(text, return_tensors="pt").to(0) outputs = model.generate(**inputs, max_new_tokens=20) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` </details> ### Load the model with Flash Attention 2 <details> <summary> Click to expand </summary> ```diff + import torch from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1" tokenizer = AutoTokenizer.from_pretrained(model_id) + model = AutoModelForCausalLM.from_pretrained(model_id, use_flash_attention_2=True) text = "Hello my name is" + inputs = tokenizer(text, return_tensors="pt").to(0) outputs = model.generate(**inputs, max_new_tokens=20) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` </details> ## Limitations The Mixtral-8x7B Instruct model is a quick demonstration that the base model can be easily fine-tuned to achieve compelling performance. It does not have any moderation mechanisms. We're looking forward to engaging with the community on ways to make the model finely respect guardrails, allowing for deployment in environments requiring moderated outputs. # The Mistral AI Team Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, Gianna Lengyel, Guillaume Bour, Guillaume Lample, Lélio Renard Lavaud, Louis Ternon, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Théophile Gervet, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed.
timm/mobilenetv2_100.ra_in1k
timm
"2023-04-27T21:14:13Z"
83,816
1
timm
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "arxiv:2110.00476", "arxiv:1801.04381", "license:apache-2.0", "region:us" ]
image-classification
"2022-12-13T00:00:26Z"
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for mobilenetv2_100.ra_in1k A MobileNet-v2 image classification model. Trained on ImageNet-1k in `timm` using recipe template described below. Recipe details: * RandAugment `RA` recipe. Inspired by and evolved from EfficientNet RandAugment recipes. Published as `B` recipe in [ResNet Strikes Back](https://arxiv.org/abs/2110.00476). * RMSProp (TF 1.0 behaviour) optimizer, EMA weight averaging * Step (exponential decay w/ staircase) LR schedule with warmup ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 3.5 - GMACs: 0.3 - Activations (M): 6.7 - Image size: 224 x 224 - **Papers:** - MobileNetV2: Inverted Residuals and Linear Bottlenecks: https://arxiv.org/abs/1801.04381 - ResNet strikes back: An improved training procedure in timm: https://arxiv.org/abs/2110.00476 - **Dataset:** ImageNet-1k - **Original:** https://github.com/huggingface/pytorch-image-models ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('mobilenetv2_100.ra_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'mobilenetv2_100.ra_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 24, 56, 56]) # torch.Size([1, 32, 28, 28]) # torch.Size([1, 96, 14, 14]) # torch.Size([1, 320, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'mobilenetv2_100.ra_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1280, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{sandler2018mobilenetv2, title={Mobilenetv2: Inverted residuals and linear bottlenecks}, author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, pages={4510--4520}, year={2018} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @inproceedings{wightman2021resnet, title={ResNet strikes back: An improved training procedure in timm}, author={Wightman, Ross and Touvron, Hugo and Jegou, Herve}, booktitle={NeurIPS 2021 Workshop on ImageNet: Past, Present, and Future} } ```
tiiuae/falcon-7b
tiiuae
"2024-09-28T07:06:56Z"
83,687
1,072
transformers
[ "transformers", "pytorch", "falcon", "text-generation", "custom_code", "en", "dataset:tiiuae/falcon-refinedweb", "arxiv:2205.14135", "arxiv:1911.02150", "arxiv:2101.00027", "arxiv:2005.14165", "arxiv:2104.09864", "arxiv:2306.01116", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-04-24T16:36:24Z"
--- datasets: - tiiuae/falcon-refinedweb language: - en inference: false license: apache-2.0 new_version: tiiuae/falcon-11B --- # 🚀 Falcon-7B **Falcon-7B is a 7B parameters causal decoder-only model built by [TII](https://www.tii.ae) and trained on 1,500B tokens of [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) enhanced with curated corpora. It is made available under the Apache 2.0 license.** *Paper coming soon* 😊. 🤗 To get started with Falcon (inference, finetuning, quantization, etc.), we recommend reading [this great blogpost fron HF](https://huggingface.co/blog/falcon)! ## Why use Falcon-7B? * **It outperforms comparable open-source models** (e.g., [MPT-7B](https://huggingface.co/mosaicml/mpt-7b), [StableLM](https://github.com/Stability-AI/StableLM), [RedPajama](https://huggingface.co/togethercomputer/RedPajama-INCITE-Base-7B-v0.1) etc.), thanks to being trained on 1,500B tokens of [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) enhanced with curated corpora. See the [OpenLLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). * **It features an architecture optimized for inference**, with FlashAttention ([Dao et al., 2022](https://arxiv.org/abs/2205.14135)) and multiquery ([Shazeer et al., 2019](https://arxiv.org/abs/1911.02150)). * **It is made available under a permissive Apache 2.0 license allowing for commercial use**, without any royalties or restrictions. ⚠️ **This is a raw, pretrained model, which should be further finetuned for most usecases.** If you are looking for a version better suited to taking generic instructions in a chat format, we recommend taking a look at [Falcon-7B-Instruct](https://huggingface.co/tiiuae/falcon-7b-instruct). 🔥 **Looking for an even more powerful model?** [Falcon-40B](https://huggingface.co/tiiuae/falcon-40b) is Falcon-7B's big brother! ```python from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model = "tiiuae/falcon-7b" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", ) sequences = pipeline( "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", max_length=200, do_sample=True, top_k=10, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` 💥 **Falcon LLMs require PyTorch 2.0 for use with `transformers`!** For fast inference with Falcon, check-out [Text Generation Inference](https://github.com/huggingface/text-generation-inference)! Read more in this [blogpost]((https://huggingface.co/blog/falcon). You will need **at least 16GB of memory** to swiftly run inference with Falcon-7B. # Model Card for Falcon-7B ## Model Details ### Model Description - **Developed by:** [https://www.tii.ae](https://www.tii.ae); - **Model type:** Causal decoder-only; - **Language(s) (NLP):** English, German, Spanish, French (and limited capabilities in Italian, Portuguese, Polish, Dutch, Romanian, Czech, Swedish); - **License:** Apache 2.0. ### Model Source - **Paper:** *coming soon*. ## Uses ### Direct Use Research on large language models; as a foundation for further specialization and finetuning for specific usecases (e.g., summarization, text generation, chatbot, etc.) ### Out-of-Scope Use Production use without adequate assessment of risks and mitigation; any use cases which may be considered irresponsible or harmful. ## Bias, Risks, and Limitations Falcon-7B is trained on English and French data only, and will not generalize appropriately to other languages. Furthermore, as it is trained on a large-scale corpora representative of the web, it will carry the stereotypes and biases commonly encountered online. ### Recommendations We recommend users of Falcon-7B to consider finetuning it for the specific set of tasks of interest, and for guardrails and appropriate precautions to be taken for any production use. ## How to Get Started with the Model ```python from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model = "tiiuae/falcon-7b" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", ) sequences = pipeline( "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", max_length=200, do_sample=True, top_k=10, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` ## Training Details ### Training Data Falcon-7B was trained on 1,500B tokens of [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb), a high-quality filtered and deduplicated web dataset which we enhanced with curated corpora. Significant components from our curated copora were inspired by The Pile ([Gao et al., 2020](https://arxiv.org/abs/2101.00027)). | **Data source** | **Fraction** | **Tokens** | **Sources** | |--------------------|--------------|------------|-----------------------------------| | [RefinedWeb-English](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) | 79% | 1,185B | massive web crawl | | Books | 7% | 110B | | | Conversations | 6% | 85B | Reddit, StackOverflow, HackerNews | | Code | 3% | 45B | | | RefinedWeb-French | 3% | 45B | massive web crawl | | Technical | 2% | 30B | arXiv, PubMed, USPTO, etc. | The data was tokenized with the Falcon-[7B](https://huggingface.co/tiiuae/falcon-7b)/[40B](https://huggingface.co/tiiuae/falcon-40b) tokenizer. ### Training Procedure Falcon-7B was trained on 384 A100 40GB GPUs, using a 2D parallelism strategy (PP=2, DP=192) combined with ZeRO. #### Training Hyperparameters | **Hyperparameter** | **Value** | **Comment** | |--------------------|------------|-------------------------------------------| | Precision | `bfloat16` | | | Optimizer | AdamW | | | Learning rate | 6e-4 | 4B tokens warm-up, cosine decay to 1.2e-5 | | Weight decay | 1e-1 | | | Z-loss | 1e-4 | | | Batch size | 2304 | 30B tokens ramp-up | #### Speeds, Sizes, Times Training happened in early March 2023 and took about two weeks. ## Evaluation *Paper coming soon*. See the [OpenLLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) for early results. ## Technical Specifications ### Model Architecture and Objective Falcon-7B is a causal decoder-only model trained on a causal language modeling task (i.e., predict the next token). The architecture is broadly adapted from the GPT-3 paper ([Brown et al., 2020](https://arxiv.org/abs/2005.14165)), with the following differences: * **Positionnal embeddings:** rotary ([Su et al., 2021](https://arxiv.org/abs/2104.09864)); * **Attention:** multiquery ([Shazeer et al., 2019](https://arxiv.org/abs/1911.02150)) and FlashAttention ([Dao et al., 2022](https://arxiv.org/abs/2205.14135)); * **Decoder-block:** parallel attention/MLP with a single layer norm. | **Hyperparameter** | **Value** | **Comment** | |--------------------|-----------|----------------------------------------| | Layers | 32 | | | `d_model` | 4544 | Increased to compensate for multiquery | | `head_dim` | 64 | Reduced to optimise for FlashAttention | | Vocabulary | 65024 | | | Sequence length | 2048 | | ### Compute Infrastructure #### Hardware Falcon-7B was trained on AWS SageMaker, on 384 A100 40GB GPUs in P4d instances. #### Software Falcon-7B was trained a custom distributed training codebase, Gigatron. It uses a 3D parallelism approach combined with ZeRO and high-performance Triton kernels (FlashAttention, etc.) ## Citation *Paper coming soon* 😊. In the meanwhile, you can use the following information to cite: ``` @article{falcon40b, title={{Falcon-40B}: an open large language model with state-of-the-art performance}, author={Almazrouei, Ebtesam and Alobeidli, Hamza and Alshamsi, Abdulaziz and Cappelli, Alessandro and Cojocaru, Ruxandra and Debbah, Merouane and Goffinet, Etienne and Heslow, Daniel and Launay, Julien and Malartic, Quentin and Noune, Badreddine and Pannier, Baptiste and Penedo, Guilherme}, year={2023} } ``` To learn more about the pretraining dataset, see the 📓 [RefinedWeb paper](https://arxiv.org/abs/2306.01116). ``` @article{refinedweb, title={The {R}efined{W}eb dataset for {F}alcon {LLM}: outperforming curated corpora with web data, and web data only}, author={Guilherme Penedo and Quentin Malartic and Daniel Hesslow and Ruxandra Cojocaru and Alessandro Cappelli and Hamza Alobeidli and Baptiste Pannier and Ebtesam Almazrouei and Julien Launay}, journal={arXiv preprint arXiv:2306.01116}, eprint={2306.01116}, eprinttype = {arXiv}, url={https://arxiv.org/abs/2306.01116}, year={2023} } ``` ## License Falcon-7B is made available under the Apache 2.0 license. ## Contact falconllm@tii.ae
cross-encoder/stsb-TinyBERT-L-4
cross-encoder
"2021-08-05T08:41:47Z"
83,591
3
transformers
[ "transformers", "pytorch", "jax", "bert", "text-classification", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
"2022-03-02T23:29:05Z"
--- license: apache-2.0 --- # Cross-Encoder for Quora Duplicate Questions Detection This model was trained using [SentenceTransformers](https://sbert.net) [Cross-Encoder](https://www.sbert.net/examples/applications/cross-encoder/README.html) class. ## Training Data This model was trained on the [STS benchmark dataset](http://ixa2.si.ehu.eus/stswiki/index.php/STSbenchmark). The model will predict a score between 0 and 1 how for the semantic similarity of two sentences. ## Usage and Performance Pre-trained models can be used like this: ``` from sentence_transformers import CrossEncoder model = CrossEncoder('model_name') scores = model.predict([('Sentence 1', 'Sentence 2'), ('Sentence 3', 'Sentence 4')]) ``` The model will predict scores for the pairs `('Sentence 1', 'Sentence 2')` and `('Sentence 3', 'Sentence 4')`. You can use this model also without sentence_transformers and by just using Transformers ``AutoModel`` class
distributed/optimized-gpt2-250m
distributed
"2024-10-08T09:55:52Z"
83,366
0
transformers
[ "transformers", "safetensors", "gpt_optimized", "text-generation", "custom_code", "arxiv:1910.09700", "autotrain_compatible", "region:us" ]
text-generation
"2024-10-01T11:21:14Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
tiiuae/falcon-40b-instruct
tiiuae
"2023-09-29T14:32:27Z"
83,141
1,172
transformers
[ "transformers", "pytorch", "falcon", "text-generation", "custom_code", "en", "dataset:tiiuae/falcon-refinedweb", "arxiv:2205.14135", "arxiv:1911.02150", "arxiv:2005.14165", "arxiv:2104.09864", "arxiv:2306.01116", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
"2023-05-25T10:14:36Z"
--- datasets: - tiiuae/falcon-refinedweb language: - en inference: false license: apache-2.0 --- # ✨ Falcon-40B-Instruct **Falcon-40B-Instruct is a 40B parameters causal decoder-only model built by [TII](https://www.tii.ae) based on [Falcon-40B](https://huggingface.co/tiiuae/falcon-40b) and finetuned on a mixture of [Baize](https://github.com/project-baize/baize-chatbot). It is made available under the Apache 2.0 license.** *Paper coming soon 😊.* 🤗 To get started with Falcon (inference, finetuning, quantization, etc.), we recommend reading [this great blogpost fron HF](https://huggingface.co/blog/falcon)! ## Why use Falcon-40B-Instruct? * **You are looking for a ready-to-use chat/instruct model based on [Falcon-40B](https://huggingface.co/tiiuae/falcon-40b).** * **Falcon-40B is the best open-source model available.** It outperforms [LLaMA](https://github.com/facebookresearch/llama), [StableLM](https://github.com/Stability-AI/StableLM), [RedPajama](https://huggingface.co/togethercomputer/RedPajama-INCITE-Base-7B-v0.1), [MPT](https://huggingface.co/mosaicml/mpt-7b), etc. See the [OpenLLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). * **It features an architecture optimized for inference**, with FlashAttention ([Dao et al., 2022](https://arxiv.org/abs/2205.14135)) and multiquery ([Shazeer et al., 2019](https://arxiv.org/abs/1911.02150)). 💬 **This is an instruct model, which may not be ideal for further finetuning.** If you are interested in building your own instruct/chat model, we recommend starting from [Falcon-40B](https://huggingface.co/tiiuae/falcon-40b). 💸 **Looking for a smaller, less expensive model?** [Falcon-7B-Instruct](https://huggingface.co/tiiuae/falcon-7b-instruct) is Falcon-40B-Instruct's little brother! ```python from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model = "tiiuae/falcon-40b-instruct" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", ) sequences = pipeline( "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", max_length=200, do_sample=True, top_k=10, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` For fast inference with Falcon, check-out [Text Generation Inference](https://github.com/huggingface/text-generation-inference)! Read more in this [blogpost]((https://huggingface.co/blog/falcon). You will need **at least 85-100GB of memory** to swiftly run inference with Falcon-40B. # Model Card for Falcon-40B-Instruct ## Model Details ### Model Description - **Developed by:** [https://www.tii.ae](https://www.tii.ae); - **Model type:** Causal decoder-only; - **Language(s) (NLP):** English and French; - **License:** Apache 2.0; - **Finetuned from model:** [Falcon-40B](https://huggingface.co/tiiuae/falcon-40b). ### Model Source - **Paper:** *coming soon*. ## Uses ### Direct Use Falcon-40B-Instruct has been finetuned on a chat dataset. ### Out-of-Scope Use Production use without adequate assessment of risks and mitigation; any use cases which may be considered irresponsible or harmful. ## Bias, Risks, and Limitations Falcon-40B-Instruct is mostly trained on English data, and will not generalize appropriately to other languages. Furthermore, as it is trained on a large-scale corpora representative of the web, it will carry the stereotypes and biases commonly encountered online. ### Recommendations We recommend users of Falcon-40B-Instruct to develop guardrails and to take appropriate precautions for any production use. ## How to Get Started with the Model ```python from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model = "tiiuae/falcon-40b-instruct" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", ) sequences = pipeline( "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", max_length=200, do_sample=True, top_k=10, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` ## Training Details ### Training Data Falcon-40B-Instruct was finetuned on a 150M tokens from [Bai ze](https://github.com/project-baize/baize-chatbot) mixed with 5% of [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) data. The data was tokenized with the Falcon-[7B](https://huggingface.co/tiiuae/falcon-7b)/[40B](https://huggingface.co/tiiuae/falcon-40b) tokenizer. ## Evaluation *Paper coming soon.* See the [OpenLLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) for early results. ## Technical Specifications For more information about pretraining, see [Falcon-40B](https://huggingface.co/tiiuae/falcon-40b). ### Model Architecture and Objective Falcon-40B is a causal decoder-only model trained on a causal language modeling task (i.e., predict the next token). The architecture is broadly adapted from the GPT-3 paper ([Brown et al., 2020](https://arxiv.org/abs/2005.14165)), with the following differences: * **Positionnal embeddings:** rotary ([Su et al., 2021](https://arxiv.org/abs/2104.09864)); * **Attention:** multiquery ([Shazeer et al., 2019](https://arxiv.org/abs/1911.02150)) and FlashAttention ([Dao et al., 2022](https://arxiv.org/abs/2205.14135)); * **Decoder-block:** parallel attention/MLP with a single layer norm. For multiquery, we are using an internal variant which uses independent key and values per tensor parallel degree. | **Hyperparameter** | **Value** | **Comment** | |--------------------|-----------|----------------------------------------| | Layers | 60 | | | `d_model` | 8192 | | | `head_dim` | 64 | Reduced to optimise for FlashAttention | | Vocabulary | 65024 | | | Sequence length | 2048 | | ### Compute Infrastructure #### Hardware Falcon-40B-Instruct was trained on AWS SageMaker, on 64 A100 40GB GPUs in P4d instances. #### Software Falcon-40B-Instruct was trained a custom distributed training codebase, Gigatron. It uses a 3D parallelism approach combined with ZeRO and high-performance Triton kernels (FlashAttention, etc.) ## Citation *Paper coming soon* 😊. In the meanwhile, you can use the following information to cite: ``` @article{falcon40b, title={{Falcon-40B}: an open large language model with state-of-the-art performance}, author={Almazrouei, Ebtesam and Alobeidli, Hamza and Alshamsi, Abdulaziz and Cappelli, Alessandro and Cojocaru, Ruxandra and Debbah, Merouane and Goffinet, Etienne and Heslow, Daniel and Launay, Julien and Malartic, Quentin and Noune, Badreddine and Pannier, Baptiste and Penedo, Guilherme}, year={2023} } ``` To learn more about the pretraining dataset, see the 📓 [RefinedWeb paper](https://arxiv.org/abs/2306.01116). ``` @article{refinedweb, title={The {R}efined{W}eb dataset for {F}alcon {LLM}: outperforming curated corpora with web data, and web data only}, author={Guilherme Penedo and Quentin Malartic and Daniel Hesslow and Ruxandra Cojocaru and Alessandro Cappelli and Hamza Alobeidli and Baptiste Pannier and Ebtesam Almazrouei and Julien Launay}, journal={arXiv preprint arXiv:2306.01116}, eprint={2306.01116}, eprinttype = {arXiv}, url={https://arxiv.org/abs/2306.01116}, year={2023} } ``` To cite the [Baize](https://github.com/project-baize/baize-chatbot) instruction dataset used for this model: ``` @article{xu2023baize, title={Baize: An Open-Source Chat Model with Parameter-Efficient Tuning on Self-Chat Data}, author={Xu, Canwen and Guo, Daya and Duan, Nan and McAuley, Julian}, journal={arXiv preprint arXiv:2304.01196}, year={2023} } ``` ## License Falcon-40B-Instruct is made available under the Apache 2.0 license. ## Contact falconllm@tii.ae
mlc-ai/Qwen2.5-0.5B-Instruct-q4f16_1-MLC
mlc-ai
"2024-09-18T21:10:07Z"
83,002
0
mlc-llm
[ "mlc-llm", "web-llm", "base_model:Qwen/Qwen2.5-0.5B-Instruct", "base_model:quantized:Qwen/Qwen2.5-0.5B-Instruct", "region:us" ]
null
"2024-09-18T19:12:17Z"
--- library_name: mlc-llm base_model: Qwen/Qwen2.5-0.5B-Instruct tags: - mlc-llm - web-llm --- # Qwen2.5-0.5B-Instruct-q4f16_1-MLC This is the [Qwen2.5-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct) model in MLC format `q4f16_1`. The model can be used for projects [MLC-LLM](https://github.com/mlc-ai/mlc-llm) and [WebLLM](https://github.com/mlc-ai/web-llm). ## Example Usage Here are some examples of using this model in MLC LLM. Before running the examples, please install MLC LLM by following the [installation documentation](https://llm.mlc.ai/docs/install/mlc_llm.html#install-mlc-packages). ### Chat In command line, run ```bash mlc_llm chat HF://mlc-ai/Qwen2.5-0.5B-Instruct-q4f16_1-MLC ``` ### REST Server In command line, run ```bash mlc_llm serve HF://mlc-ai/Qwen2.5-0.5B-Instruct-q4f16_1-MLC ``` ### Python API ```python from mlc_llm import MLCEngine # Create engine model = "HF://mlc-ai/Qwen2.5-0.5B-Instruct-q4f16_1-MLC" engine = MLCEngine(model) # Run chat completion in OpenAI API. for response in engine.chat.completions.create( messages=[{"role": "user", "content": "What is the meaning of life?"}], model=model, stream=True, ): for choice in response.choices: print(choice.delta.content, end="", flush=True) print("\n") engine.terminate() ``` ## Documentation For more information on MLC LLM project, please visit our [documentation](https://llm.mlc.ai/docs/) and [GitHub repo](http://github.com/mlc-ai/mlc-llm).
dandelin/vilt-b32-finetuned-vqa
dandelin
"2022-08-02T13:03:04Z"
82,904
384
transformers
[ "transformers", "pytorch", "vilt", "visual-question-answering", "arxiv:2102.03334", "license:apache-2.0", "endpoints_compatible", "region:us" ]
visual-question-answering
"2022-03-02T23:29:05Z"
--- tags: - visual-question-answering license: apache-2.0 widget: - text: "What's the animal doing?" src: "https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg" - text: "What is on top of the building?" src: "https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg" --- # Vision-and-Language Transformer (ViLT), fine-tuned on VQAv2 Vision-and-Language Transformer (ViLT) model fine-tuned on [VQAv2](https://visualqa.org/). It was introduced in the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Kim et al. and first released in [this repository](https://github.com/dandelin/ViLT). Disclaimer: The team releasing ViLT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Intended uses & limitations You can use the raw model for visual question answering. ### How to use Here is how to use this model in PyTorch: ```python from transformers import ViltProcessor, ViltForQuestionAnswering import requests from PIL import Image # prepare image + question url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) text = "How many cats are there?" processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa") model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa") # prepare inputs encoding = processor(image, text, return_tensors="pt") # forward pass outputs = model(**encoding) logits = outputs.logits idx = logits.argmax(-1).item() print("Predicted answer:", model.config.id2label[idx]) ``` ## Training data (to do) ## Training procedure ### Preprocessing (to do) ### Pretraining (to do) ## Evaluation results (to do) ### BibTeX entry and citation info ```bibtex @misc{kim2021vilt, title={ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision}, author={Wonjae Kim and Bokyung Son and Ildoo Kim}, year={2021}, eprint={2102.03334}, archivePrefix={arXiv}, primaryClass={stat.ML} } ```
TheBloke/Llama-2-7B-Chat-GPTQ
TheBloke
"2023-09-27T12:44:48Z"
82,750
259
transformers
[ "transformers", "safetensors", "llama", "text-generation", "facebook", "meta", "pytorch", "llama-2", "en", "arxiv:2307.09288", "base_model:meta-llama/Llama-2-7b-chat-hf", "base_model:quantized:meta-llama/Llama-2-7b-chat-hf", "license:llama2", "autotrain_compatible", "text-generation-inference", "4-bit", "gptq", "region:us" ]
text-generation
"2023-07-18T17:38:15Z"
--- language: - en license: llama2 tags: - facebook - meta - pytorch - llama - llama-2 model_name: Llama 2 7B Chat arxiv: 2307.09288 base_model: meta-llama/Llama-2-7b-chat-hf inference: false model_creator: Meta Llama 2 model_type: llama pipeline_tag: text-generation prompt_template: '[INST] <<SYS>> You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don''t know the answer to a question, please don''t share false information. <</SYS>> {prompt}[/INST] ' quantized_by: TheBloke --- <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Llama 2 7B Chat - GPTQ - Model creator: [Meta Llama 2](https://huggingface.co/meta-llama) - Original model: [Llama 2 7B Chat](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) <!-- description start --> ## Description This repo contains GPTQ model files for [Meta Llama 2's Llama 2 7B Chat](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf). Multiple GPTQ parameter permutations are provided; see Provided Files below for details of the options provided, their parameters, and the software used to create them. <!-- description end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/Llama-2-7b-Chat-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF) * [Meta Llama 2's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: Llama-2-Chat ``` [INST] <<SYS>> You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. <</SYS>> {prompt}[/INST] ``` <!-- prompt-template end --> <!-- README_GPTQ.md-provided-files start --> ## Provided files and GPTQ parameters Multiple quantisation parameters are provided, to allow you to choose the best one for your hardware and requirements. Each separate quant is in a different branch. See below for instructions on fetching from different branches. All recent GPTQ files are made with AutoGPTQ, and all files in non-main branches are made with AutoGPTQ. Files in the `main` branch which were uploaded before August 2023 were made with GPTQ-for-LLaMa. <details> <summary>Explanation of GPTQ parameters</summary> - Bits: The bit size of the quantised model. - GS: GPTQ group size. Higher numbers use less VRAM, but have lower quantisation accuracy. "None" is the lowest possible value. - Act Order: True or False. Also known as `desc_act`. True results in better quantisation accuracy. Some GPTQ clients have had issues with models that use Act Order plus Group Size, but this is generally resolved now. - Damp %: A GPTQ parameter that affects how samples are processed for quantisation. 0.01 is default, but 0.1 results in slightly better accuracy. - GPTQ dataset: The dataset used for quantisation. Using a dataset more appropriate to the model's training can improve quantisation accuracy. Note that the GPTQ dataset is not the same as the dataset used to train the model - please refer to the original model repo for details of the training dataset(s). - Sequence Length: The length of the dataset sequences used for quantisation. Ideally this is the same as the model sequence length. For some very long sequence models (16+K), a lower sequence length may have to be used. Note that a lower sequence length does not limit the sequence length of the quantised model. It only impacts the quantisation accuracy on longer inference sequences. - ExLlama Compatibility: Whether this file can be loaded with ExLlama, which currently only supports Llama models in 4-bit. </details> | Branch | Bits | GS | Act Order | Damp % | GPTQ Dataset | Seq Len | Size | ExLlama | Desc | | ------ | ---- | -- | --------- | ------ | ------------ | ------- | ---- | ------- | ---- | | [gptq-4bit-64g-actorder_True](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GPTQ/tree/gptq-4bit-64g-actorder_True) | 4 | 64 | Yes | 0.01 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 4096 | 4.02 GB | Yes | 4-bit, with Act Order and group size 64g. Uses less VRAM than 32g, but with slightly lower accuracy. | | [gptq-4bit-32g-actorder_True](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GPTQ/tree/gptq-4bit-32g-actorder_True) | 4 | 32 | Yes | 0.01 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 4096 | 4.28 GB | Yes | 4-bit, with Act Order and group size 32g. Gives highest possible inference quality, with maximum VRAM usage. | | [gptq-4bit-128g-actorder_True](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GPTQ/tree/gptq-4bit-128g-actorder_True) | 4 | 128 | Yes | 0.01 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 4096 | 3.90 GB | Yes | 4-bit, with Act Order and group size 128g. Uses even less VRAM than 64g, but with slightly lower accuracy. | | [main](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GPTQ/tree/main) | 4 | 128 | No | 0.01 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 4096 | 3.90 GB | Yes | 4-bit, without Act Order and group size 128g. | <!-- README_GPTQ.md-provided-files end --> <!-- README_GPTQ.md-download-from-branches start --> ## How to download from branches - In text-generation-webui, you can add `:branch` to the end of the download name, eg `TheBloke/Llama-2-7b-Chat-GPTQ:gptq-4bit-64g-actorder_True` - With Git, you can clone a branch with: ``` git clone --single-branch --branch gptq-4bit-64g-actorder_True https://huggingface.co/TheBloke/Llama-2-7b-Chat-GPTQ ``` - In Python Transformers code, the branch is the `revision` parameter; see below. <!-- README_GPTQ.md-download-from-branches end --> <!-- README_GPTQ.md-text-generation-webui start --> ## How to easily download and use this model in [text-generation-webui](https://github.com/oobabooga/text-generation-webui). Please make sure you're using the latest version of [text-generation-webui](https://github.com/oobabooga/text-generation-webui). It is strongly recommended to use the text-generation-webui one-click-installers unless you're sure you know how to make a manual install. 1. Click the **Model tab**. 2. Under **Download custom model or LoRA**, enter `TheBloke/Llama-2-7b-Chat-GPTQ`. - To download from a specific branch, enter for example `TheBloke/Llama-2-7b-Chat-GPTQ:gptq-4bit-64g-actorder_True` - see Provided Files above for the list of branches for each option. 3. Click **Download**. 4. The model will start downloading. Once it's finished it will say "Done". 5. In the top left, click the refresh icon next to **Model**. 6. In the **Model** dropdown, choose the model you just downloaded: `Llama-2-7b-Chat-GPTQ` 7. The model will automatically load, and is now ready for use! 8. If you want any custom settings, set them and then click **Save settings for this model** followed by **Reload the Model** in the top right. * Note that you do not need to and should not set manual GPTQ parameters any more. These are set automatically from the file `quantize_config.json`. 9. Once you're ready, click the **Text Generation tab** and enter a prompt to get started! <!-- README_GPTQ.md-text-generation-webui end --> <!-- README_GPTQ.md-use-from-python start --> ## How to use this GPTQ model from Python code ### Install the necessary packages Requires: Transformers 4.32.0 or later, Optimum 1.12.0 or later, and AutoGPTQ 0.4.2 or later. ```shell pip3 install transformers>=4.32.0 optimum>=1.12.0 pip3 install auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/ # Use cu117 if on CUDA 11.7 ``` If you have problems installing AutoGPTQ using the pre-built wheels, install it from source instead: ```shell pip3 uninstall -y auto-gptq git clone https://github.com/PanQiWei/AutoGPTQ cd AutoGPTQ pip3 install . ``` ### For CodeLlama models only: you must use Transformers 4.33.0 or later. If 4.33.0 is not yet released when you read this, you will need to install Transformers from source: ```shell pip3 uninstall -y transformers pip3 install git+https://github.com/huggingface/transformers.git ``` ### You can then use the following code ```python from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_name_or_path = "TheBloke/Llama-2-7b-Chat-GPTQ" # To use a different branch, change revision # For example: revision="gptq-4bit-64g-actorder_True" model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto", trust_remote_code=False, revision="main") tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True) prompt = "Tell me about AI" prompt_template=f'''[INST] <<SYS>> You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. <</SYS>> {prompt}[/INST] ''' print("\n\n*** Generate:") input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda() output = model.generate(inputs=input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512) print(tokenizer.decode(output[0])) # Inference can also be done using transformers' pipeline print("*** Pipeline:") pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, do_sample=True, temperature=0.7, top_p=0.95, top_k=40, repetition_penalty=1.1 ) print(pipe(prompt_template)[0]['generated_text']) ``` <!-- README_GPTQ.md-use-from-python end --> <!-- README_GPTQ.md-compatibility start --> ## Compatibility The files provided are tested to work with AutoGPTQ, both via Transformers and using AutoGPTQ directly. They should also work with [Occ4m's GPTQ-for-LLaMa fork](https://github.com/0cc4m/KoboldAI). [ExLlama](https://github.com/turboderp/exllama) is compatible with Llama models in 4-bit. Please see the Provided Files table above for per-file compatibility. [Huggingface Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) is compatible with all GPTQ models. <!-- README_GPTQ.md-compatibility end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Alicia Loh, Stephen Murray, K, Ajan Kanaga, RoA, Magnesian, Deo Leter, Olakabola, Eugene Pentland, zynix, Deep Realms, Raymond Fosdick, Elijah Stavena, Iucharbius, Erik Bjäreholt, Luis Javier Navarrete Lozano, Nicholas, theTransient, John Detwiler, alfie_i, knownsqashed, Mano Prime, Willem Michiel, Enrico Ros, LangChain4j, OG, Michael Dempsey, Pierre Kircher, Pedro Madruga, James Bentley, Thomas Belote, Luke @flexchar, Leonard Tan, Johann-Peter Hartmann, Illia Dulskyi, Fen Risland, Chadd, S_X, Jeff Scroggin, Ken Nordquist, Sean Connelly, Artur Olbinski, Swaroop Kallakuri, Jack West, Ai Maven, David Ziegler, Russ Johnson, transmissions 11, John Villwock, Alps Aficionado, Clay Pascal, Viktor Bowallius, Subspace Studios, Rainer Wilmers, Trenton Dambrowitz, vamX, Michael Levine, 준교 김, Brandon Frisco, Kalila, Trailburnt, Randy H, Talal Aujan, Nathan Dryer, Vadim, 阿明, ReadyPlayerEmma, Tiffany J. Kim, George Stoitzev, Spencer Kim, Jerry Meng, Gabriel Tamborski, Cory Kujawski, Jeffrey Morgan, Spiking Neurons AB, Edmond Seymore, Alexandros Triantafyllidis, Lone Striker, Cap'n Zoog, Nikolai Manek, danny, ya boyyy, Derek Yates, usrbinkat, Mandus, TL, Nathan LeClaire, subjectnull, Imad Khwaja, webtim, Raven Klaugh, Asp the Wyvern, Gabriel Puliatti, Caitlyn Gatomon, Joseph William Delisle, Jonathan Leane, Luke Pendergrass, SuperWojo, Sebastain Graf, Will Dee, Fred von Graf, Andrey, Dan Guido, Daniel P. Andersen, Nitin Borwankar, Elle, Vitor Caleffi, biorpg, jjj, NimbleBox.ai, Pieter, Matthew Berman, terasurfer, Michael Davis, Alex, Stanislav Ovsiannikov Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> # Original model card: Meta Llama 2's Llama 2 7B Chat # **Llama 2** Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. ## Model Details *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. **Model Developers** Meta **Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. **Input** Models input text only. **Output** Models generate text only. **Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. ||Training Data|Params|Content Length|GQA|Tokens|LR| |---|---|---|---|---|---|---| |Llama 2|*A new mix of publicly available online data*|7B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|13B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|70B|4k|&#10004;|2.0T|1.5 x 10<sup>-4</sup>| *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. **Model Dates** Llama 2 was trained between January 2023 and July 2023. **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) **Research Paper** ["Llama-2: Open Foundation and Fine-tuned Chat Models"](arxiv.org/abs/2307.09288) ## Intended Use **Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the `INST` and `<<SYS>>` tags, `BOS` and `EOS` tokens, and the whitespaces and breaklines in between (we recommend calling `strip()` on inputs to avoid double-spaces). See our reference code in github for details: [`chat_completion`](https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212). **Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. ## Hardware and Software **Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. **Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. ||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)| |---|---|---|---| |Llama 2 7B|184320|400|31.22| |Llama 2 13B|368640|400|62.44| |Llama 2 70B|1720320|400|291.42| |Total|3311616||539.00| **CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. ## Training Data **Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. **Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. ## Evaluation Results In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. |Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval| |---|---|---|---|---|---|---|---|---|---| |Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9| |Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9| |Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7| |Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6| |Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3| |Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1| |Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**| **Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. |||TruthfulQA|Toxigen| |---|---|---|---| |Llama 1|7B|27.42|23.00| |Llama 1|13B|41.74|23.08| |Llama 1|33B|44.19|22.57| |Llama 1|65B|48.71|21.77| |Llama 2|7B|33.29|**21.25**| |Llama 2|13B|41.86|26.10| |Llama 2|70B|**50.18**|24.60| **Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). |||TruthfulQA|Toxigen| |---|---|---|---| |Llama-2-Chat|7B|57.04|**0.00**| |Llama-2-Chat|13B|62.18|**0.00**| |Llama-2-Chat|70B|**64.14**|0.01| **Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above. ## Ethical Considerations and Limitations Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide) ## Reporting Issues Please report any software “bug,” or other problems with the models through one of the following means: - Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) - Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) - Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) ## Llama Model Index |Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf| |---|---|---|---|---| |7B| [Link](https://huggingface.co/llamaste/Llama-2-7b) | [Link](https://huggingface.co/llamaste/Llama-2-7b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat-hf)| |13B| [Link](https://huggingface.co/llamaste/Llama-2-13b) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-13b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf)| |70B| [Link](https://huggingface.co/llamaste/Llama-2-70b) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-70b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf)|
THUDM/glm-4v-9b
THUDM
"2024-08-12T07:42:56Z"
82,456
215
transformers
[ "transformers", "safetensors", "chatglm", "glm", "thudm", "custom_code", "zh", "en", "arxiv:2406.12793", "arxiv:2311.03079", "license:other", "region:us" ]
null
"2024-06-04T08:58:46Z"
--- license: other license_name: glm-4 license_link: https://huggingface.co/THUDM/glm-4v-9b/blob/main/LICENSE language: - zh - en tags: - glm - chatglm - thudm inference: false --- # GLM-4V-9B Read this in [English](README_en.md) **2024/08/12, 本仓库代码已更新并使用 `transforemrs>=4.44.0`, 请及时更新依赖。** GLM-4V-9B 是智谱 AI 推出的最新一代预训练模型 GLM-4 系列中的开源多模态版本。 **GLM-4V-9B** 具备 1120 * 1120 高分辨率下的中英双语多轮对话能力,在中英文综合能力、感知推理、文字识别、图表理解等多方面多模态评测中,GLM-4V-9B 表现出超越 GPT-4-turbo-2024-04-09、Gemini 1.0 Pro、Qwen-VL-Max 和 Claude 3 Opus 的卓越性能。 ### 多模态能力 GLM-4V-9B 是一个多模态语言模型,具备视觉理解能力,其相关经典任务的评测结果如下: | | **MMBench-EN-Test** | **MMBench-CN-Test** | **SEEDBench_IMG** | **MMStar** | **MMMU** | **MME** | **HallusionBench** | **AI2D** | **OCRBench** | |-------------------------|---------------------|---------------------|-------------------|------------|----------|---------|--------------------|----------|--------------| | | 英文综合 | 中文综合 | 综合能力 | 综合能力 | 学科综合 | 感知推理 | 幻觉性 | 图表理解 | 文字识别 | | **GPT-4o, 20240513** | 83.4 | 82.1 | 77.1 | 63.9 | 69.2 | 2310.3 | 55 | 84.6 | 736 | | **GPT-4v, 20240409** | 81 | 80.2 | 73 | 56 | 61.7 | 2070.2 | 43.9 | 78.6 | 656 | | **GPT-4v, 20231106** | 77 | 74.4 | 72.3 | 49.7 | 53.8 | 1771.5 | 46.5 | 75.9 | 516 | | **InternVL-Chat-V1.5** | 82.3 | 80.7 | 75.2 | 57.1 | 46.8 | 2189.6 | 47.4 | 80.6 | 720 | | **LlaVA-Next-Yi-34B** | 81.1 | 79 | 75.7 | 51.6 | 48.8 | 2050.2 | 34.8 | 78.9 | 574 | | **Step-1V** | 80.7 | 79.9 | 70.3 | 50 | 49.9 | 2206.4 | 48.4 | 79.2 | 625 | | **MiniCPM-Llama3-V2.5** | 77.6 | 73.8 | 72.3 | 51.8 | 45.8 | 2024.6 | 42.4 | 78.4 | 725 | | **Qwen-VL-Max** | 77.6 | 75.7 | 72.7 | 49.5 | 52 | 2281.7 | 41.2 | 75.7 | 684 | | **GeminiProVision** | 73.6 | 74.3 | 70.7 | 38.6 | 49 | 2148.9 | 45.7 | 72.9 | 680 | | **Claude-3V Opus** | 63.3 | 59.2 | 64 | 45.7 | 54.9 | 1586.8 | 37.8 | 70.6 | 694 | | **GLM-4v-9B** | 81.1 | 79.4 | 76.8 | 58.7 | 47.2 | 2163.8 | 46.6 | 81.1 | 786 | **本仓库是 GLM-4V-9B 的模型仓库,支持`8K`上下文长度。** ## 运行模型 **更多推理代码和依赖信息,请访问我们的 [github](https://github.com/THUDM/GLM-4)。** **请严格按照[依赖](https://github.com/THUDM/GLM-4/blob/main/basic_demo/requirements.txt)安装,否则无法正常运行。** 。 ```python import torch from PIL import Image from transformers import AutoModelForCausalLM, AutoTokenizer device = "cuda" tokenizer = AutoTokenizer.from_pretrained("THUDM/glm-4v-9b", trust_remote_code=True) query = '描述这张图片' image = Image.open("your image").convert('RGB') inputs = tokenizer.apply_chat_template([{"role": "user", "image": image, "content": query}], add_generation_prompt=True, tokenize=True, return_tensors="pt", return_dict=True) # chat mode inputs = inputs.to(device) model = AutoModelForCausalLM.from_pretrained( "THUDM/glm-4v-9b", torch_dtype=torch.bfloat16, low_cpu_mem_usage=True, trust_remote_code=True ).to(device).eval() gen_kwargs = {"max_length": 2500, "do_sample": True, "top_k": 1} with torch.no_grad(): outputs = model.generate(**inputs, **gen_kwargs) outputs = outputs[:, inputs['input_ids'].shape[1]:] print(tokenizer.decode(outputs[0])) ``` ## 协议 GLM-4 模型的权重的使用则需要遵循 [LICENSE](LICENSE)。 ## 引用 如果你觉得我们的工作有帮助的话,请考虑引用下列论文。 ``` @misc{glm2024chatglm, title={ChatGLM: A Family of Large Language Models from GLM-130B to GLM-4 All Tools}, author={Team GLM and Aohan Zeng and Bin Xu and Bowen Wang and Chenhui Zhang and Da Yin and Diego Rojas and Guanyu Feng and Hanlin Zhao and Hanyu Lai and Hao Yu and Hongning Wang and Jiadai Sun and Jiajie Zhang and Jiale Cheng and Jiayi Gui and Jie Tang and Jing Zhang and Juanzi Li and Lei Zhao and Lindong Wu and Lucen Zhong and Mingdao Liu and Minlie Huang and Peng Zhang and Qinkai Zheng and Rui Lu and Shuaiqi Duan and Shudan Zhang and Shulin Cao and Shuxun Yang and Weng Lam Tam and Wenyi Zhao and Xiao Liu and Xiao Xia and Xiaohan Zhang and Xiaotao Gu and Xin Lv and Xinghan Liu and Xinyi Liu and Xinyue Yang and Xixuan Song and Xunkai Zhang and Yifan An and Yifan Xu and Yilin Niu and Yuantao Yang and Yueyan Li and Yushi Bai and Yuxiao Dong and Zehan Qi and Zhaoyu Wang and Zhen Yang and Zhengxiao Du and Zhenyu Hou and Zihan Wang}, year={2024}, eprint={2406.12793}, archivePrefix={arXiv}, primaryClass={id='cs.CL' full_name='Computation and Language' is_active=True alt_name='cmp-lg' in_archive='cs' is_general=False description='Covers natural language processing. Roughly includes material in ACM Subject Class I.2.7. Note that work on artificial languages (programming languages, logics, formal systems) that does not explicitly address natural-language issues broadly construed (natural-language processing, computational linguistics, speech, text retrieval, etc.) is not appropriate for this area.'} } ``` ``` @misc{wang2023cogvlm, title={CogVLM: Visual Expert for Pretrained Language Models}, author={Weihan Wang and Qingsong Lv and Wenmeng Yu and Wenyi Hong and Ji Qi and Yan Wang and Junhui Ji and Zhuoyi Yang and Lei Zhao and Xixuan Song and Jiazheng Xu and Bin Xu and Juanzi Li and Yuxiao Dong and Ming Ding and Jie Tang}, year={2023}, eprint={2311.03079}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
echarlaix/tiny-random-stable-diffusion-xl
echarlaix
"2023-07-06T16:08:37Z"
82,035
1
diffusers
[ "diffusers", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
"2023-07-06T16:06:52Z"
--- license: apache-2.0 ---