modelId
stringlengths
5
122
author
stringlengths
2
42
last_modified
unknown
downloads
int64
0
75.3M
likes
int64
0
10.6k
library_name
stringclasses
189 values
tags
sequencelengths
1
1.84k
pipeline_tag
stringclasses
48 values
createdAt
unknown
card
stringlengths
1
901k
speechbrain/lang-id-voxlingua107-ecapa
speechbrain
"2024-02-25T23:48:07Z"
118,301
64
speechbrain
[ "speechbrain", "audio-classification", "embeddings", "Language", "Identification", "pytorch", "ECAPA-TDNN", "TDNN", "VoxLingua107", "multilingual", "ab", "af", "am", "ar", "as", "az", "ba", "be", "bg", "bi", "bo", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "eo", "es", "et", "eu", "fa", "fi", "fo", "fr", "gl", "gn", "gu", "gv", "ha", "haw", "hi", "hr", "ht", "hu", "hy", "ia", "id", "is", "it", "he", "ja", "jv", "ka", "kk", "km", "kn", "ko", "la", "lm", "ln", "lo", "lt", "lv", "mg", "mi", "mk", "ml", "mn", "mr", "ms", "mt", "my", "ne", "nl", "nn", "no", "oc", "pa", "pl", "ps", "pt", "ro", "ru", "sa", "sco", "sd", "si", "sk", "sl", "sn", "so", "sq", "sr", "su", "sv", "sw", "ta", "te", "tg", "th", "tk", "tl", "tr", "tt", "uk", "ud", "uz", "vi", "war", "yi", "yo", "zh", "dataset:VoxLingua107", "arxiv:2106.04624", "license:apache-2.0", "has_space", "region:us" ]
audio-classification
"2022-03-02T23:29:05Z"
--- language: - multilingual - ab - af - am - ar - as - az - ba - be - bg - bi - bo - br - bs - ca - ceb - cs - cy - da - de - el - en - eo - es - et - eu - fa - fi - fo - fr - gl - gn - gu - gv - ha - haw - hi - hr - ht - hu - hy - ia - id - is - it - he - ja - jv - ka - kk - km - kn - ko - la - lm - ln - lo - lt - lv - mg - mi - mk - ml - mn - mr - ms - mt - my - ne - nl - nn - no - oc - pa - pl - ps - pt - ro - ru - sa - sco - sd - si - sk - sl - sn - so - sq - sr - su - sv - sw - ta - te - tg - th - tk - tl - tr - tt - uk - ud - uz - vi - war - yi - yo - zh thumbnail: tags: - audio-classification - speechbrain - embeddings - Language - Identification - pytorch - ECAPA-TDNN - TDNN - VoxLingua107 license: "apache-2.0" datasets: - VoxLingua107 metrics: - Accuracy widget: - example_title: English Sample src: https://cdn-media.huggingface.co/speech_samples/LibriSpeech_61-70968-0000.flac --- # VoxLingua107 ECAPA-TDNN Spoken Language Identification Model ## Model description This is a spoken language recognition model trained on the VoxLingua107 dataset using SpeechBrain. The model uses the ECAPA-TDNN architecture that has previously been used for speaker recognition. However, it uses more fully connected hidden layers after the embedding layer, and cross-entropy loss was used for training. We observed that this improved the performance of extracted utterance embeddings for downstream tasks. The system is trained with recordings sampled at 16kHz (single channel). The code will automatically normalize your audio (i.e., resampling + mono channel selection) when calling *classify_file* if needed. The model can classify a speech utterance according to the language spoken. It covers 107 different languages ( Abkhazian, Afrikaans, Amharic, Arabic, Assamese, Azerbaijani, Bashkir, Belarusian, Bulgarian, Bengali, Tibetan, Breton, Bosnian, Catalan, Cebuano, Czech, Welsh, Danish, German, Greek, English, Esperanto, Spanish, Estonian, Basque, Persian, Finnish, Faroese, French, Galician, Guarani, Gujarati, Manx, Hausa, Hawaiian, Hindi, Croatian, Haitian, Hungarian, Armenian, Interlingua, Indonesian, Icelandic, Italian, Hebrew, Japanese, Javanese, Georgian, Kazakh, Central Khmer, Kannada, Korean, Latin, Luxembourgish, Lingala, Lao, Lithuanian, Latvian, Malagasy, Maori, Macedonian, Malayalam, Mongolian, Marathi, Malay, Maltese, Burmese, Nepali, Dutch, Norwegian Nynorsk, Norwegian, Occitan, Panjabi, Polish, Pushto, Portuguese, Romanian, Russian, Sanskrit, Scots, Sindhi, Sinhala, Slovak, Slovenian, Shona, Somali, Albanian, Serbian, Sundanese, Swedish, Swahili, Tamil, Telugu, Tajik, Thai, Turkmen, Tagalog, Turkish, Tatar, Ukrainian, Urdu, Uzbek, Vietnamese, Waray, Yiddish, Yoruba, Mandarin Chinese). ## Intended uses & limitations The model has two uses: - use 'as is' for spoken language recognition - use as an utterance-level feature (embedding) extractor, for creating a dedicated language ID model on your own data The model is trained on automatically collected YouTube data. For more information about the dataset, see [here](http://bark.phon.ioc.ee/voxlingua107/). #### How to use ```bash pip install git+https://github.com/speechbrain/speechbrain.git@develop ``` ```python import torchaudio from speechbrain.inference.classifiers import EncoderClassifier language_id = EncoderClassifier.from_hparams(source="speechbrain/lang-id-voxlingua107-ecapa", savedir="tmp") # Download Thai language sample from Omniglot and cvert to suitable form signal = language_id.load_audio("speechbrain/lang-id-voxlingua107-ecapa/udhr_th.wav") prediction = language_id.classify_batch(signal) print(prediction) # (tensor([[-2.8646e+01, -3.0346e+01, -2.0748e+01, -2.9562e+01, -2.2187e+01, # -3.2668e+01, -3.6677e+01, -3.3573e+01, -3.2545e+01, -2.4365e+01, # -2.4688e+01, -3.1171e+01, -2.7743e+01, -2.9918e+01, -2.4770e+01, # -3.2250e+01, -2.4727e+01, -2.6087e+01, -2.1870e+01, -3.2821e+01, # -2.2128e+01, -2.2822e+01, -3.0888e+01, -3.3564e+01, -2.9906e+01, # -2.2392e+01, -2.5573e+01, -2.6443e+01, -3.2429e+01, -3.2652e+01, # -3.0030e+01, -2.4607e+01, -2.2967e+01, -2.4396e+01, -2.8578e+01, # -2.5153e+01, -2.8475e+01, -2.6409e+01, -2.5230e+01, -2.7957e+01, # -2.6298e+01, -2.3609e+01, -2.5863e+01, -2.8225e+01, -2.7225e+01, # -3.0486e+01, -2.1185e+01, -2.7938e+01, -3.3155e+01, -1.9076e+01, # -2.9181e+01, -2.2160e+01, -1.8352e+01, -2.5866e+01, -3.3636e+01, # -4.2016e+00, -3.1581e+01, -3.1894e+01, -2.7834e+01, -2.5429e+01, # -3.2235e+01, -3.2280e+01, -2.8786e+01, -2.3366e+01, -2.6047e+01, # -2.2075e+01, -2.3770e+01, -2.2518e+01, -2.8101e+01, -2.5745e+01, # -2.6441e+01, -2.9822e+01, -2.7109e+01, -3.0225e+01, -2.4566e+01, # -2.9268e+01, -2.7651e+01, -3.4221e+01, -2.9026e+01, -2.6009e+01, # -3.1968e+01, -3.1747e+01, -2.8156e+01, -2.9025e+01, -2.7756e+01, # -2.8052e+01, -2.9341e+01, -2.8806e+01, -2.1636e+01, -2.3992e+01, # -2.3794e+01, -3.3743e+01, -2.8332e+01, -2.7465e+01, -1.5085e-02, # -2.9094e+01, -2.1444e+01, -2.9780e+01, -3.6046e+01, -3.7401e+01, # -3.0888e+01, -3.3172e+01, -1.8931e+01, -2.2679e+01, -3.0225e+01, # -2.4995e+01, -2.1028e+01]]), tensor([-0.0151]), tensor([94]), ['th']) # The scores in the prediction[0] tensor can be interpreted as log-likelihoods that # the given utterance belongs to the given language (i.e., the larger the better) # The linear-scale likelihood can be retrieved using the following: print(prediction[1].exp()) # tensor([0.9850]) # The identified language ISO code is given in prediction[3] print(prediction[3]) # ['th: Thai'] # Alternatively, use the utterance embedding extractor: emb = language_id.encode_batch(signal) print(emb.shape) # torch.Size([1, 1, 256]) ``` To perform inference on the GPU, add `run_opts={"device":"cuda"}` when calling the `from_hparams` method. The system is trained with recordings sampled at 16kHz (single channel). The code will automatically normalize your audio (i.e., resampling + mono channel selection) when calling *classify_file* if needed. Make sure your input tensor is compliant with the expected sampling rate if you use *encode_batch* and *classify_batch*. #### Limitations and bias Since the model is trained on VoxLingua107, it has many limitations and biases, some of which are: - Probably it's accuracy on smaller languages is quite limited - Probably it works worse on female speech than male speech (because YouTube data includes much more male speech) - Based on subjective experiments, it doesn't work well on speech with a foreign accent - Probably it doesn't work well on children's speech and on persons with speech disorders ## Training data The model is trained on [VoxLingua107](http://bark.phon.ioc.ee/voxlingua107/). VoxLingua107 is a speech dataset for training spoken language identification models. The dataset consists of short speech segments automatically extracted from YouTube videos and labeled according the language of the video title and description, with some post-processing steps to filter out false positives. VoxLingua107 contains data for 107 languages. The total amount of speech in the training set is 6628 hours. The average amount of data per language is 62 hours. However, the real amount per language varies a lot. There is also a seperate development set containing 1609 speech segments from 33 languages, validated by at least two volunteers to really contain the given language. ## Training procedure See the [SpeechBrain recipe](https://github.com/speechbrain/speechbrain/tree/voxlingua107/recipes/VoxLingua107/lang_id). ## Evaluation results Error rate: 6.7% on the VoxLingua107 development dataset #### Referencing SpeechBrain ```bibtex @misc{speechbrain, title={{SpeechBrain}: A General-Purpose Speech Toolkit}, author={Mirco Ravanelli and Titouan Parcollet and Peter Plantinga and Aku Rouhe and Samuele Cornell and Loren Lugosch and Cem Subakan and Nauman Dawalatabad and Abdelwahab Heba and Jianyuan Zhong and Ju-Chieh Chou and Sung-Lin Yeh and Szu-Wei Fu and Chien-Feng Liao and Elena Rastorgueva and François Grondin and William Aris and Hwidong Na and Yan Gao and Renato De Mori and Yoshua Bengio}, year={2021}, eprint={2106.04624}, archivePrefix={arXiv}, primaryClass={eess.AS}, note={arXiv:2106.04624} } ``` ### Referencing VoxLingua107 ```bibtex @inproceedings{valk2021slt, title={{VoxLingua107}: a Dataset for Spoken Language Recognition}, author={J{\"o}rgen Valk and Tanel Alum{\"a}e}, booktitle={Proc. IEEE SLT Workshop}, year={2021}, } ``` #### About SpeechBrain SpeechBrain is an open-source and all-in-one speech toolkit. It is designed to be simple, extremely flexible, and user-friendly. Competitive or state-of-the-art performance is obtained in various domains. Website: https://speechbrain.github.io/ GitHub: https://github.com/speechbrain/speechbrain
Minej/bert-base-personality
Minej
"2023-07-13T13:11:50Z"
118,034
20
transformers
[ "transformers", "pytorch", "safetensors", "bert", "text-classification", "en", "arxiv:1810.04805", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text-classification
"2023-06-06T19:17:08Z"
--- license: mit language: - en library_name: transformers pipeline_tag: text-classification --- ## How to Get Started with the Model To use the model through Hosted inference API, follow the code snippet provided below: ```python from transformers import BertTokenizer, BertForSequenceClassification def personality_detection(text): tokenizer = BertTokenizer.from_pretrained("Minej/bert-base-personality") model = BertForSequenceClassification.from_pretrained("Minej/bert-base-personality") inputs = tokenizer(text, truncation=True, padding=True, return_tensors="pt") outputs = model(**inputs) predictions = outputs.logits.squeeze().detach().numpy() label_names = ['Extroversion', 'Neuroticism', 'Agreeableness', 'Conscientiousness', 'Openness'] result = {label_names[i]: predictions[i] for i in range(len(label_names))} return result ``` #### Result Format The personality_detection function returns a dictionary containing the predicted personality traits based on the given input text. The dictionary contains the following personality traits with their corresponding predicted values: Extroversion: A value between 0 and 1 representing the predicted extroversion trait. Neuroticism: A value between 0 and 1 representing the predicted neuroticism trait. Agreeableness: A value between 0 and 1 representing the predicted agreeableness trait. Conscientiousness: A value between 0 and 1 representing the predicted conscientiousness trait. Openness: A value between 0 and 1 representing the predicted openness trait. ```python text_input = "I am feeling excited about the upcoming event." personality_prediction = personality_detection(text_input) print(personality_prediction) ``` ###### Output: ```python { "Extroversion": 0.535, "Neuroticism": 0.576, "Agreeableness": 0.399, "Conscientiousness": 0.253, "Openness": 0.563 } ``` Note: The values in the example output are just placeholders and may not reflect the actual predictions. You can modify the example code and the result format to match your specific use case and desired output format. ### Model Description Transfer Learning for Big Five Personality Prediction In machine learning, training accurate models can be challenging when labeled data is limited. Transfer learning offers a solution by leveraging pre-existing labeled data from a similar task or domain. By transferring knowledge learned from one task to another, we can overcome data scarcity and train more effective models. In this project, we used transfer learning with the BERT BASE UNCASED model to predict Big Five personality traits. The model was fine-tuned on a curated dataset for personality traits, learning patterns between input text and personality characteristics. By applying transfer learning, we improved the accuracy of personality trait predictions. By leveraging transfer learning and fine-tuning BERT BASE UNCASED, we accurately predict an individual's Big Five personality traits based on their input text. This approach addresses the challenges of limited labeled data in personality prediction, providing insights into individuals' personalities. This project showcases the power of transfer learning in machine learning and highlights the effectiveness of BERT BASE UNCASED for predicting Big Five personality traits. - **Model type:** BERT BASE UNCASED - **Language(s) (NLP):** English - **License:** MIT - **Finetuned from model [optional]:** https://huggingface.co/bert-base-uncased ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use The personality prediction model can be used directly by individuals who are interested in gaining insights into their own personality traits based on their input text. Users can input text and receive predictions for the Big Five personality traits. ### Downstream Use This model is not intended for downstream use or fine-tuning for specific tasks. It is designed as a standalone personality prediction model. ### Out-of-Scope Use This model is not suitable for uses beyond personality prediction. It should not be used for making critical decisions or judgments about individuals in areas such as employment, education, or legal matters. ## Bias, Risks, and Limitations The personality prediction model, like any machine learning model, has certain limitations and potential biases that should be taken into account: Limited Context: The model makes predictions based on input text alone and may not capture the full context of an individual's personality. It is important to consider that personality traits are influenced by various factors beyond textual expression. Generalization: The model predicts personality traits based on patterns learned from a specific dataset. Its performance may vary when applied to individuals from different demographic or cultural backgrounds not well represented in the training data. Ethical Considerations: Personality prediction models should be used responsibly, with an understanding that personality traits do not determine a person's worth or abilities. It is important to avoid making unfair judgments or discriminating against individuals based on predicted personality traits. Privacy Concerns: The model relies on user-provided input text, which may contain sensitive or personal information. Users should exercise caution when sharing personal details and ensure the security of their data. False Positives/Negatives: The model's predictions may not always align perfectly with an individual's actual personality traits. It is possible for the model to generate false positives (predicting a trait that is not present) or false negatives (missing a trait that is present). ### Recommendations To mitigate risks and limitations associated with personality prediction models, the following recommendations are suggested: Awareness and Education: Users should be informed about the limitations and potential biases of the model. Promote understanding that personality traits are complex and cannot be fully captured by a single model or text analysis. Avoid Stereotyping and Discrimination: Users should be cautious about making judgments or decisions solely based on predicted personality traits. Personality predictions should not be used to discriminate against individuals or perpetuate stereotypes. Interpret with Context: Interpret the model's predictions in the appropriate context and consider additional information about an individual beyond their input text. Data Privacy and Security: Ensure that user data is handled securely and with respect to privacy regulations. Users should be aware of the information they provide and exercise caution when sharing personal details. Promote Ethical Use: Encourage responsible use of personality prediction models and discourage misuse or harmful applications. It is important to note that the above recommendations are general guidelines, and further context-specific recommendations should be developed based on the particular use case and ethical considerations. ## How to Download the Model If you would like to download the model files and use them instead of the Hosted inference API, then you can follow the code snippet provided below: ```python from transformers import BertForSequenceClassification, BertTokenizer import torch # Initialization of the model values model = BertForSequenceClassification.from_pretrained(".", num_labels=5) tokenizer = BertTokenizer.from_pretrained('.', do_lower_case=True) model.config.label2id = { "Extroversion": 0, "Neuroticism": 1, "Agreeableness": 2, "Conscientiousness": 3, "Openness": 4, } model.config.id2label = { "0": "Extroversion", "1": "Neuroticism", "2": "Agreeableness", "3": "Conscientiousness", "4": "Openness", } def personality_detection(model_input: str) -> dict: ''' Performs personality prediction on the given input text Args: model_input (str): The text conversation Returns: dict: A dictionary where keys are speaker labels and values are their personality predictions ''' if len(model_input) == 0: ret = { "Extroversion": float(0), "Neuroticism": float(0), "Agreeableness": float(0), "Conscientiousness": float(0), "Openness": float(0), } return ret else: dict_custom = {} preprocess_part1 = model_input[:len(model_input)] dict1 = tokenizer.encode_plus(preprocess_part1, max_length=1024, padding=True, truncation=True) dict_custom['input_ids'] = [dict1['input_ids'], dict1['input_ids']] dict_custom['token_type_ids'] = [dict1['token_type_ids'], dict1['token_type_ids']] dict_custom['attention_mask'] = [dict1['attention_mask'], dict1['attention_mask']] outs = model(torch.tensor(dict_custom['input_ids']), token_type_ids=None, attention_mask=torch.tensor(dict_custom['attention_mask'])) b_logit_pred = outs[0] pred_label = torch.sigmoid(b_logit_pred) ret = { "Extroversion": float(pred_label[0][0]), "Neuroticism": float(pred_label[0][1]), "Agreeableness": float(pred_label[0][2]), "Conscientiousness": float(pred_label[0][3]), "Openness": float(pred_label[0][4]), } return ret personality_prediction = personality_detection(text_input) ``` Make sure you have the required dependencies installed (transformers and torch). This code snippet initializes the model, tokenizer, and configuration. It then defines the personality_detection function, which takes a text conversation as input and returns a dictionary with personality predictions for each speaker. You can call the personality_detection function with your input text to obtain the personality predictions. The personality_prediction variable will hold the resulting dictionary. Please note that this code assumes you have already downloaded the necessary model files (config.json, pytorch_model.bin, special_tokens_map.json, tokenizer_config.json, vocab.txt ) and placed them in the current directory (indicated by "."). Adjust the paths and filenames accordingly if needed. ## Citation @article{DBLP:journals/corr/abs-1810-04805, author = {Jacob Devlin and Ming{-}Wei Chang and Kenton Lee and Kristina Toutanova}, title = {{BERT:} Pre-training of Deep Bidirectional Transformers for Language Understanding}, journal = {CoRR}, volume = {abs/1810.04805}, year = {2018}, url = {http://arxiv.org/abs/1810.04805}, archivePrefix = {arXiv}, eprint = {1810.04805}, timestamp = {Tue, 30 Oct 2018 20:39:56 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-1810-04805.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ## More Information TBA
jonathandinu/face-parsing
jonathandinu
"2024-01-29T16:18:34Z"
117,776
71
transformers
[ "transformers", "pytorch", "onnx", "safetensors", "segformer", "vision", "image-segmentation", "nvidia/mit-b5", "transformers.js", "en", "dataset:celebamaskhq", "arxiv:2105.15203", "endpoints_compatible", "has_space", "region:us" ]
image-segmentation
"2022-07-06T01:22:42Z"
--- language: en library_name: transformers tags: - vision - image-segmentation - nvidia/mit-b5 - transformers.js - onnx datasets: - celebamaskhq --- # Face Parsing ![example image and output](demo.png) [Semantic segmentation](https://huggingface.co/docs/transformers/tasks/semantic_segmentation) model fine-tuned from [nvidia/mit-b5](https://huggingface.co/nvidia/mit-b5) with [CelebAMask-HQ](https://github.com/switchablenorms/CelebAMask-HQ) for face parsing. For additional options, see the Transformers [Segformer docs](https://huggingface.co/docs/transformers/model_doc/segformer). > ONNX model for web inference contributed by [Xenova](https://huggingface.co/Xenova). ## Usage in Python Exhaustive list of labels can be extracted from [config.json](https://huggingface.co/jonathandinu/face-parsing/blob/65972ac96180b397f86fda0980bbe68e6ee01b8f/config.json#L30). | id | label | note | | :-: | :--------- | :---------------- | | 0 | background | | | 1 | skin | | | 2 | nose | | | 3 | eye_g | eyeglasses | | 4 | l_eye | left eye | | 5 | r_eye | right eye | | 6 | l_brow | left eyebrow | | 7 | r_brow | right eyebrow | | 8 | l_ear | left ear | | 9 | r_ear | right ear | | 10 | mouth | area between lips | | 11 | u_lip | upper lip | | 12 | l_lip | lower lip | | 13 | hair | | | 14 | hat | | | 15 | ear_r | earring | | 16 | neck_l | necklace | | 17 | neck | | | 18 | cloth | clothing | ```python import torch from torch import nn from transformers import SegformerImageProcessor, SegformerForSemanticSegmentation from PIL import Image import matplotlib.pyplot as plt import requests # convenience expression for automatically determining device device = ( "cuda" # Device for NVIDIA or AMD GPUs if torch.cuda.is_available() else "mps" # Device for Apple Silicon (Metal Performance Shaders) if torch.backends.mps.is_available() else "cpu" ) # load models image_processor = SegformerImageProcessor.from_pretrained("jonathandinu/face-parsing") model = SegformerForSemanticSegmentation.from_pretrained("jonathandinu/face-parsing") model.to(device) # expects a PIL.Image or torch.Tensor url = "https://images.unsplash.com/photo-1539571696357-5a69c17a67c6" image = Image.open(requests.get(url, stream=True).raw) # run inference on image inputs = image_processor(images=image, return_tensors="pt").to(device) outputs = model(**inputs) logits = outputs.logits # shape (batch_size, num_labels, ~height/4, ~width/4) # resize output to match input image dimensions upsampled_logits = nn.functional.interpolate(logits, size=image.size[::-1], # H x W mode='bilinear', align_corners=False) # get label masks labels = upsampled_logits.argmax(dim=1)[0] # move to CPU to visualize in matplotlib labels_viz = labels.cpu().numpy() plt.imshow(labels_viz) plt.show() ``` ## Usage in the browser (Transformers.js) ```js import { pipeline, env, } from "https://cdn.jsdelivr.net/npm/@xenova/transformers@2.14.0"; // important to prevent errors since the model files are likely remote on HF hub env.allowLocalModels = false; // instantiate image segmentation pipeline with pretrained face parsing model model = await pipeline("image-segmentation", "jonathandinu/face-parsing"); // async inference since it could take a few seconds const output = await model(url); // each label is a separate mask object // [ // { score: null, label: 'background', mask: transformers.js RawImage { ... }} // { score: null, label: 'hair', mask: transformers.js RawImage { ... }} // ... // ] for (const m of output) { print(`Found ${m.label}`); m.mask.save(`${m.label}.png`); } ``` ### p5.js Since [p5.js](https://p5js.org/) uses an animation loop abstraction, we need to take care loading the model and making predictions. ```js // ... // asynchronously load transformers.js and instantiate model async function preload() { // load transformers.js library with a dynamic import const { pipeline, env } = await import( "https://cdn.jsdelivr.net/npm/@xenova/transformers@2.14.0" ); // important to prevent errors since the model files are remote on HF hub env.allowLocalModels = false; // instantiate image segmentation pipeline with pretrained face parsing model model = await pipeline("image-segmentation", "jonathandinu/face-parsing"); print("face-parsing model loaded"); } // ... ``` [full p5.js example](https://editor.p5js.org/jonathan.ai/sketches/wZn15Dvgh) ### Model Description - **Developed by:** [Jonathan Dinu](https://twitter.com/jonathandinu) - **Model type:** Transformer-based semantic segmentation image model - **License:** non-commercial research and educational purposes - **Resources for more information:** Transformers docs on [Segformer](https://huggingface.co/docs/transformers/model_doc/segformer) and/or the [original research paper](https://arxiv.org/abs/2105.15203). ## Limitations and Bias ### Bias While the capabilities of computer vision models are impressive, they can also reinforce or exacerbate social biases. The [CelebAMask-HQ](https://github.com/switchablenorms/CelebAMask-HQ) dataset used for fine-tuning is large but not necessarily perfectly diverse or representative. Also, they are images of.... just celebrities.
laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft-soup
laion
"2024-01-16T22:19:59Z"
117,585
11
open_clip
[ "open_clip", "tensorboard", "safetensors", "zero-shot-image-classification", "clip", "arxiv:2201.03545", "arxiv:2210.08402", "arxiv:1910.04867", "license:mit", "has_space", "region:us" ]
zero-shot-image-classification
"2023-02-11T01:35:52Z"
--- tags: - zero-shot-image-classification - clip license: mit library_name: open_clip pipeline_tag: zero-shot-image-classification --- # Model card for CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft-soup # Table of Contents 1. [Model Details](#model-details) 2. [Uses](#uses) 3. [Training Details](#training-details) 4. [Evaluation](#evaluation) 5. [Acknowledgements](#acknowledgements) 6. [Citation](#citation) # Model Details ## Model Description A series of CLIP [ConvNeXt-Large](https://arxiv.org/abs/2201.03545) (w/ extra text depth, vision MLP head) models trained on the LAION-2B (english) subset of [LAION-5B](https://arxiv.org/abs/2210.08402) using [OpenCLIP](https://github.com/mlfoundations/open_clip). The models utilize: * the [timm](https://github.com/rwightman/pytorch-image-models) ConvNeXt-Large model (`convnext_large`) as the image tower * a MLP (`fc - gelu - drop - fc`) head in vision tower instead of the single projection of other CLIP models * a text tower with same width but 4 layers more depth than ViT-L / RN50x16 models (depth 16, embed dim 768). This 320x320 resolution model is a soup (weight average) of 3 fine-tunes of [CLIP-convnext_large_d.laion2B-s26B-b102K-augreg](https://huggingface.co/laion/CLIP-convnext_large_d.laion2B-s26B-b102K-augreg) at a higher resolution. It is an average of 3 fine-tunes from the final checkpoint of the original 256x256 training run w/ an additional ~2-3B samples for each fine-tune and a lower learning rate. Each fine-tune was a different learning rate (1e-4, 6e-5, 5e-5), and diff # of samples (3.2B, 2B, 2.5B). At 320x320, the ConvNext-Large-D is significantly more efficient than the L/14 model at 336x336 that OpenAI fine-tuned. L/14-336 model is 2.5x more GMAC, 2.8x more activations, and 1.22x more parameters. | Model | Dataset | Resolution | AugReg | Top-1 ImageNet Zero-Shot (%) | | ----- | ------- | ---------- | ------------ | --------- | | [convnext_large_d.laion2b_s26b_b102k-augreg](https://huggingface.co/laion/CLIP-convnext_large_d.laion2B-s26B-b102K-augreg) | LAION-2B | 256x256 | RRC (0.33, 1.0), RE (0.35), SD (0.1), D(0.1) | 75.9 | | [convnext_large_d_320.laion2b_s29b_b131k-ft](https://huggingface.co/laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft) | LAION-2B | 320x320 | RRC (0.5, 1.0), RE (0.4), SD (0.1), D(0.0) | 76.6 | | [convnext_large_d_320.laion2b_s29b_b131k-ft-soup](https://huggingface.co/laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft-soup) | LAION-2B | 320x320 | RRC (0.5, 1.0), RE (0.4), SD (0.1), D(0.0) | 76.9 | RRC = Random Resize Crop (crop pcts), RE = Random Erasing (prob), SD = Stochastic Depth (prob) -- image tower only, D = Dropout (prob) -- image tower head only LAION-A = LAION Aesthetic, an ~900M sample subset of LAION-2B with pHash dedupe and asthetic score filtering. Model training done by Ross Wightman on the [stability.ai](https://stability.ai/) cluster. # Uses As per the original [OpenAI CLIP model card](https://github.com/openai/CLIP/blob/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1/model-card.md), this model is intended as a research output for research communities. We hope that this model will enable researchers to better understand and explore zero-shot, arbitrary image classification. We also hope it can be used for interdisciplinary studies of the potential impact of such model. The OpenAI CLIP paper includes a discussion of potential downstream impacts to provide an example for this sort of analysis. Additionally, the LAION-5B blog (https://laion.ai/blog/laion-5b/) and upcoming paper include additional discussion as it relates specifically to the training dataset. ## Direct Use Zero-shot image classification, image and text retrieval, among others. ## Downstream Use Image classification and other image task fine-tuning, linear probe image classification, image generation guiding and conditioning, among others. ## Out-of-Scope Use As per the OpenAI models, **Any** deployed use case of the model - whether commercial or not - is currently out of scope. Non-deployed use cases such as image search in a constrained environment, are also not recommended unless there is thorough in-domain testing of the model with a specific, fixed class taxonomy. This is because our safety assessment demonstrated a high need for task specific testing especially given the variability of CLIP’s performance with different class taxonomies. This makes untested and unconstrained deployment of the model in any use case currently potentially harmful. Certain use cases which would fall under the domain of surveillance and facial recognition are always out-of-scope regardless of performance of the model. This is because the use of artificial intelligence for tasks such as these can be premature currently given the lack of testing norms and checks to ensure its fair use. Since the model has not been purposefully trained in or evaluated on any languages other than English, its use should be limited to English language use cases. Further the above notice, the LAION-5B dataset used in training of these models has additional considerations, see below. # Training Details ## Training Data This model was trained with LAION-2B -- A 2 billion sample English subset of LAION-5B (https://laion.ai/blog/laion-5b/). **IMPORTANT NOTE:** The motivation behind dataset creation is to democratize research and experimentation around large-scale multi-modal model training and handling of uncurated, large-scale datasets crawled from publically available internet. Our recommendation is therefore to use the dataset for research purposes. Be aware that this large-scale dataset is uncurated. Keep in mind that the uncurated nature of the dataset means that collected links may lead to strongly discomforting and disturbing content for a human viewer. Therefore, please use the demo links with caution and at your own risk. It is possible to extract a “safe” subset by filtering out samples based on the safety tags (using a customized trained NSFW classifier that we built). While this strongly reduces the chance for encountering potentially harmful content when viewing, we cannot entirely exclude the possibility for harmful content being still present in safe mode, so that the warning holds also there. We think that providing the dataset openly to broad research and other interested communities will allow for transparent investigation of benefits that come along with training large-scale models as well as pitfalls and dangers that may stay unreported or unnoticed when working with closed large datasets that remain restricted to a small community. Providing our dataset openly, we however do not recommend using it for creating ready-to-go industrial products, as the basic research about general properties and safety of such large-scale models, which we would like to encourage with this release, is still in progress. ## Training Procedure All 320x320 model fine-tunes were trained with a global batch size of 131072 for 10-16 checkpoint intervals of 203.7M samples for a total of ~2-3B samples seen over fine-tune. For 320x320 models, a slurm script w/ srun below was used on 64 8-GPU (A100 40GB) nodes (Stability). ``` /opt/slurm/sbin/srun --cpu_bind=v --accel-bind=gn python -m training.main \ --save-frequency 1 \ --name "convnext_large_320" \ --pretrained ""/runs/convnext_large_256/epoch_128.pt" \ --resume 'latest' \ --train-data="pipe:aws s3 cp s3://mybucket/path/{laion{00000..xxxxx}.tar -" \ --train-num-samples 203666042 \ --dataset-type webdataset \ --precision amp_bfloat16 \ --beta2 0.98 \ --warmup 2000 \ --batch-size=256 \ --epochs=12 \ --dataset-resampled \ --aug-cfg use_timm=True scale='(0.5, 1.0)' re_prob=0.4 \ --clip-grad-norm 5.0 \ --lr 5e-5 \ --workers=6 \ --model "convnext_large_d_320" \ --seed 0 \ --ddp-static-graph \ --local-loss \ --gather-with-grad \ --grad-checkpointing ``` # Evaluation Evaluation done with code in the [LAION CLIP Benchmark suite](https://github.com/LAION-AI/CLIP_benchmark). ## Testing Data, Factors & Metrics ### Testing Data The testing is performed with VTAB+ (A combination of VTAB (https://arxiv.org/abs/1910.04867) w/ additional robustness datasets) for classification and COCO and Flickr for retrieval. ## Results The models achieve between 75.9 and 76.9 top-1 zero-shot accuracy on ImageNet-1k. Zero-shot curve of origina from-scratch 256x256 training: ![](convnext_large_zero_shot.png) An initial round of benchmarks have been performed on a wider range of datasets, to be viewable at https://github.com/LAION-AI/CLIP_benchmark/blob/main/benchmark/results.ipynb # Acknowledgements Acknowledging [stability.ai](https://stability.ai/) for compute used to train this model. # Citation **BibTeX:** LAION-5B ```bibtex @inproceedings{schuhmann2022laionb, title={{LAION}-5B: An open large-scale dataset for training next generation image-text models}, author={Christoph Schuhmann and Romain Beaumont and Richard Vencu and Cade W Gordon and Ross Wightman and Mehdi Cherti and Theo Coombes and Aarush Katta and Clayton Mullis and Mitchell Wortsman and Patrick Schramowski and Srivatsa R Kundurthy and Katherine Crowson and Ludwig Schmidt and Robert Kaczmarczyk and Jenia Jitsev}, booktitle={Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track}, year={2022}, url={https://openreview.net/forum?id=M3Y74vmsMcY} } ``` OpenCLIP software ```bibtex @software{ilharco_gabriel_2021_5143773, author = {Ilharco, Gabriel and Wortsman, Mitchell and Wightman, Ross and Gordon, Cade and Carlini, Nicholas and Taori, Rohan and Dave, Achal and Shankar, Vaishaal and Namkoong, Hongseok and Miller, John and Hajishirzi, Hannaneh and Farhadi, Ali and Schmidt, Ludwig}, title = {OpenCLIP}, month = jul, year = 2021, note = {If you use this software, please cite it as below.}, publisher = {Zenodo}, version = {0.1}, doi = {10.5281/zenodo.5143773}, url = {https://doi.org/10.5281/zenodo.5143773} } ``` ``` @InProceedings{pmlr-v162-wortsman22a, title = {Model soups: averaging weights of multiple fine-tuned models improves accuracy without increasing inference time}, author = {Wortsman, Mitchell and Ilharco, Gabriel and Gadre, Samir Ya and Roelofs, Rebecca and Gontijo-Lopes, Raphael and Morcos, Ari S and Namkoong, Hongseok and Farhadi, Ali and Carmon, Yair and Kornblith, Simon and Schmidt, Ludwig}, booktitle = {Proceedings of the 39th International Conference on Machine Learning}, pages = {23965--23998}, year = {2022}, editor = {Chaudhuri, Kamalika and Jegelka, Stefanie and Song, Le and Szepesvari, Csaba and Niu, Gang and Sabato, Sivan}, volume = {162}, series = {Proceedings of Machine Learning Research}, month = {17--23 Jul}, publisher = {PMLR}, pdf = {https://proceedings.mlr.press/v162/wortsman22a/wortsman22a.pdf}, url = {https://proceedings.mlr.press/v162/wortsman22a.html} } ``` OpenAI CLIP paper ```bibtex @inproceedings{Radford2021LearningTV, title={Learning Transferable Visual Models From Natural Language Supervision}, author={Alec Radford and Jong Wook Kim and Chris Hallacy and A. Ramesh and Gabriel Goh and Sandhini Agarwal and Girish Sastry and Amanda Askell and Pamela Mishkin and Jack Clark and Gretchen Krueger and Ilya Sutskever}, booktitle={ICML}, year={2021} } ``` ```bibtex @Article{liu2022convnet, author = {Zhuang Liu and Hanzi Mao and Chao-Yuan Wu and Christoph Feichtenhofer and Trevor Darrell and Saining Xie}, title = {A ConvNet for the 2020s}, journal = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2022}, } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/rwightman/pytorch-image-models}} } ```
schhwmn/mbart-large-50-finetuned-ukr-gec
schhwmn
"2022-04-21T11:33:45Z"
117,493
0
transformers
[ "transformers", "pytorch", "mbart", "text2text-generation", "gec", "mbart-50", "uk", "arxiv:2103.16997", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text2text-generation
"2022-04-15T20:24:24Z"
--- language: uk tags: - gec - mbart-50 widget: - text: "я й не думав що комп'ютерна лінгвістика це легкоо." --- This model was finetuned on errorful sentences from the `train` subset of [UA-GEC](https://github.com/grammarly/ua-gec) corpus, introduced in [UA-GEC: Grammatical Error Correction and Fluency Corpus for the Ukrainian Language](https://arxiv.org/abs/2103.16997) paper. Only sentences containing errors were used; 8,874 sentences for training and 987 sentences for validation. The training arguments were defined as follows: ``` batch_size = 4 num_train_epochs = 3 learning_rate=5e-5 weight_decay=0.01 optim = "adamw_hf" ```
microsoft/speecht5_tts
microsoft
"2023-11-08T14:37:23Z"
117,392
497
transformers
[ "transformers", "pytorch", "speecht5", "text-to-audio", "audio", "text-to-speech", "dataset:libritts", "arxiv:2110.07205", "arxiv:1910.09700", "license:mit", "endpoints_compatible", "has_space", "region:us" ]
text-to-speech
"2023-02-02T12:56:54Z"
--- license: mit tags: - audio - text-to-speech datasets: - libritts --- # SpeechT5 (TTS task) SpeechT5 model fine-tuned for speech synthesis (text-to-speech) on LibriTTS. This model was introduced in [SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing](https://arxiv.org/abs/2110.07205) by Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei. SpeechT5 was first released in [this repository](https://github.com/microsoft/SpeechT5/), [original weights](https://huggingface.co/mechanicalsea/speecht5-tts). The license used is [MIT](https://github.com/microsoft/SpeechT5/blob/main/LICENSE). ## Model Description Motivated by the success of T5 (Text-To-Text Transfer Transformer) in pre-trained natural language processing models, we propose a unified-modal SpeechT5 framework that explores the encoder-decoder pre-training for self-supervised speech/text representation learning. The SpeechT5 framework consists of a shared encoder-decoder network and six modal-specific (speech/text) pre/post-nets. After preprocessing the input speech/text through the pre-nets, the shared encoder-decoder network models the sequence-to-sequence transformation, and then the post-nets generate the output in the speech/text modality based on the output of the decoder. Leveraging large-scale unlabeled speech and text data, we pre-train SpeechT5 to learn a unified-modal representation, hoping to improve the modeling capability for both speech and text. To align the textual and speech information into this unified semantic space, we propose a cross-modal vector quantization approach that randomly mixes up speech/text states with latent units as the interface between encoder and decoder. Extensive evaluations show the superiority of the proposed SpeechT5 framework on a wide variety of spoken language processing tasks, including automatic speech recognition, speech synthesis, speech translation, voice conversion, speech enhancement, and speaker identification. - **Developed by:** Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei. - **Shared by [optional]:** [Matthijs Hollemans](https://huggingface.co/Matthijs) - **Model type:** text-to-speech - **Language(s) (NLP):** [More Information Needed] - **License:** [MIT](https://github.com/microsoft/SpeechT5/blob/main/LICENSE) - **Finetuned from model [optional]:** [More Information Needed] ## Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [https://github.com/microsoft/SpeechT5/] - **Paper:** [https://arxiv.org/pdf/2110.07205.pdf] - **Blog Post:** [https://huggingface.co/blog/speecht5] - **Demo:** [https://huggingface.co/spaces/Matthijs/speecht5-tts-demo] # Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ## 🤗 Transformers Usage You can run SpeechT5 TTS locally with the 🤗 Transformers library. 1. First install the 🤗 [Transformers library](https://github.com/huggingface/transformers), sentencepiece, soundfile and datasets(optional): ``` pip install --upgrade pip pip install --upgrade transformers sentencepiece datasets[audio] ``` 2. Run inference via the `Text-to-Speech` (TTS) pipeline. You can access the SpeechT5 model via the TTS pipeline in just a few lines of code! ```python from transformers import pipeline from datasets import load_dataset import soundfile as sf synthesiser = pipeline("text-to-speech", "microsoft/speecht5_tts") embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") speaker_embedding = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) # You can replace this embedding with your own as well. speech = synthesiser("Hello, my dog is cooler than you!", forward_params={"speaker_embeddings": speaker_embedding}) sf.write("speech.wav", speech["audio"], samplerate=speech["sampling_rate"]) ``` 3. Run inference via the Transformers modelling code - You can use the processor + generate code to convert text into a mono 16 kHz speech waveform for more fine-grained control. ```python from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan from datasets import load_dataset import torch import soundfile as sf from datasets import load_dataset processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") inputs = processor(text="Hello, my dog is cute.", return_tensors="pt") # load xvector containing speaker's voice characteristics from a dataset embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder) sf.write("speech.wav", speech.numpy(), samplerate=16000) ``` ### Fine-tuning the Model Refer to [this Colab notebook](https://colab.research.google.com/drive/1i7I5pzBcU3WDFarDnzweIj4-sVVoIUFJ) for an example of how to fine-tune SpeechT5 for TTS on a different dataset or a new language. ## Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> You can use this model for speech synthesis. See the [model hub](https://huggingface.co/models?search=speecht5) to look for fine-tuned versions on a task that interests you. ## Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ## Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] # Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ## Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. # Training Details ## Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> LibriTTS ## Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> ### Preprocessing [optional] Leveraging large-scale unlabeled speech and text data, we pre-train SpeechT5 to learn a unified-modal representation, hoping to improve the modeling capability for both speech and text. ### Training hyperparameters - **Precision:** [More Information Needed] <!--fp16, bf16, fp8, fp32 --> - **Regime:** [More Information Needed] <!--mixed precision or not --> ### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] # Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ## Testing Data, Factors & Metrics ### Testing Data <!-- This should link to a Data Card if possible. --> [More Information Needed] ### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] ### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ## Results [More Information Needed] ### Summary # Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> Extensive evaluations show the superiority of the proposed SpeechT5 framework on a wide variety of spoken language processing tasks, including automatic speech recognition, speech synthesis, speech translation, voice conversion, speech enhancement, and speaker identification. # Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] # Technical Specifications [optional] ## Model Architecture and Objective The SpeechT5 framework consists of a shared encoder-decoder network and six modal-specific (speech/text) pre/post-nets. After preprocessing the input speech/text through the pre-nets, the shared encoder-decoder network models the sequence-to-sequence transformation, and then the post-nets generate the output in the speech/text modality based on the output of the decoder. ## Compute Infrastructure [More Information Needed] ### Hardware [More Information Needed] ### Software [More Information Needed] # Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** ```bibtex @inproceedings{ao-etal-2022-speecht5, title = {{S}peech{T}5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing}, author = {Ao, Junyi and Wang, Rui and Zhou, Long and Wang, Chengyi and Ren, Shuo and Wu, Yu and Liu, Shujie and Ko, Tom and Li, Qing and Zhang, Yu and Wei, Zhihua and Qian, Yao and Li, Jinyu and Wei, Furu}, booktitle = {Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, month = {May}, year = {2022}, pages={5723--5738}, } ``` # Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> - **text-to-speech** to synthesize audio # More Information [optional] [More Information Needed] # Model Card Authors [optional] Disclaimer: The team releasing SpeechT5 did not write a model card for this model so this model card has been written by the Hugging Face team. # Model Card Contact [More Information Needed]
snrspeaks/KeyPhraseTransformer
snrspeaks
"2022-03-25T13:05:44Z"
116,990
8
transformers
[ "transformers", "pytorch", "t5", "text2text-generation", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text2text-generation
"2022-03-25T12:23:25Z"
--- license: mit ---
WinKawaks/vit-tiny-patch16-224
WinKawaks
"2023-03-30T14:56:06Z"
116,938
9
transformers
[ "transformers", "pytorch", "safetensors", "vit", "image-classification", "vision", "dataset:imagenet", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
"2022-03-02T23:29:05Z"
--- license: apache-2.0 tags: - vision - image-classification datasets: - imagenet widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace --- Google didn't publish vit-tiny and vit-small model checkpoints in Hugging Face. I converted the weights from the [timm repository](https://github.com/rwightman/pytorch-image-models). This model is used in the same way as [ViT-base](https://huggingface.co/google/vit-base-patch16-224). Note that [safetensors] model requires torch 2.0 environment.
echarlaix/tiny-random-mistral
echarlaix
"2023-10-06T09:06:13Z"
116,748
1
transformers
[ "transformers", "pytorch", "mistral", "text-generation", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-10-06T08:53:48Z"
--- license: apache-2.0 ---
databricks/dbrx-instruct
databricks
"2024-04-11T21:30:56Z"
116,682
1,006
transformers
[ "transformers", "safetensors", "dbrx", "text-generation", "conversational", "custom_code", "arxiv:2211.15841", "arxiv:2304.11277", "license:other", "autotrain_compatible", "has_space", "region:us" ]
text-generation
"2024-03-26T20:07:24Z"
--- extra_gated_heading: You need to share contact information with Databricks to access this model extra_gated_prompt: >- ### DBRX Terms of Use Use of DBRX is governed by the [Databricks Open Model License](https://www.databricks.com/legal/open-model-license) and the [Databricks Open Model Acceptable Use Policy](https://www.databricks.com/legal/acceptable-use-policy-open-model). extra_gated_fields: First Name: text Last Name: text Organization: text By clicking 'Submit' below, I accept the terms of the license and acknowledge that the information I provide will be collected, stored, processed, and shared in accordance with Databricks' Privacy Notice and I understand I can update my preferences at any time: checkbox extra_gated_description: >- The information you provide will be collected, stored, processed, and shared in accordance with Databricks [Privacy Notice](https://www.databricks.com/legal/privacynotice). extra_gated_button_content: Submit inference: false license: other license_name: databricks-open-model-license license_link: https://www.databricks.com/legal/open-model-license --- # DBRX Instruct * DBRX Instruct is a mixture-of-experts (MoE) large language model trained from scratch by Databricks. DBRX Instruct specializes in few-turn interactions. * We are releasing both DBRX Instruct and DBRX Base, the pretrained base model which underlies it, under [an open license](https://www.databricks.com/legal/open-model-license). * This is the repository for DBRX Instruct. DBRX Base can be found [here](https://huggingface.co/databricks/dbrx-base). * For full details on the DBRX models, please read our [technical blog post](https://www.databricks.com/blog/introducing-dbrx-new-state-art-open-llm). ## Model Overview DBRX is a [transformer-based](https://www.isattentionallyouneed.com/) decoder-only large language model (LLM) that was trained using next-token prediction. It uses a *fine-grained* mixture-of-experts (MoE) architecture with 132B total parameters of which 36B parameters are active on any input. It was pre-trained on 12T tokens of text and code data. Compared to other open MoE models like Mixtral-8x7B and Grok-1, DBRX is fine-grained, meaning it uses a larger number of smaller experts. DBRX has 16 experts and chooses 4, while Mixtral-8x7B and Grok-1 have 8 experts and choose 2. This provides 65x more possible combinations of experts and we found that this improves model quality. DBRX uses rotary position encodings (RoPE), gated linear units (GLU), and grouped query attention (GQA). It uses the GPT-4 tokenizer as provided in the [tiktoken](https://github.com/openai/tiktoken) repository. We made these choices based on exhaustive evaluation and scaling experiments. DBRX was pretrained on 12T tokens of carefully curated data and a maximum context length of 32K tokens. We estimate that this data is at least 2x better token-for-token than the data we used to pretrain the MPT family of models. This new dataset was developed using the full suite of Databricks tools, including Apache Spark™ and Databricks notebooks for data processing, and Unity Catalog for data management and governance. We used curriculum learning for pretraining, changing the data mix during training in ways we found to substantially improve model quality. * **Inputs:** DBRX only accepts text-based inputs and accepts a context length of up to 32768 tokens. * **Outputs:** DBRX only produces text-based outputs. * **Model Architecture:** More detailed information about DBRX Instruct and DBRX Base can be found in our [technical blog post](https://www.databricks.com/blog/introducing-dbrx-new-state-art-open-llm). * **License:** [Databricks Open Model License](https://www.databricks.com/legal/open-model-license) * **Acceptable Use Policy:** [Databricks Open Model Acceptable Use Policy](https://www.databricks.com/legal/acceptable-use-policy-open-model) * **Version:** 1.0 * **Owner:** Databricks, Inc. ## Usage These are several general ways to use the DBRX models: * DBRX Base and DBRX Instruct are available for download on HuggingFace (see our Quickstart guide below). This is the HF repository for DBRX Instruct; DBRX Base can be found [here](https://huggingface.co/databricks/dbrx-base). * The DBRX model repository can be found on GitHub [here](https://github.com/databricks/dbrx). * DBRX Base and DBRX Instruct are available with [Databricks Foundation Model APIs](https://docs.databricks.com/en/machine-learning/foundation-models/index.html) via both *Pay-per-token* and *Provisioned Throughput* endpoints. These are enterprise-ready deployments. * For more information on how to fine-tune using LLM-Foundry, please take a look at our LLM pretraining and fine-tuning [documentation](https://github.com/mosaicml/llm-foundry/blob/main/scripts/train/README.md). ## Quickstart Guide **NOTE: This is DBRX Instruct, and has been instruction finetuned.** If you are looking for the base model, please use [DBRX Base](https://huggingface.co/databricks/dbrx-base). Getting started with DBRX models is easy with the `transformers` library. The model requires ~264GB of RAM and the following packages: ```bash pip install "transformers>=4.39.2" "tiktoken>=0.6.0" ``` If you'd like to speed up download time, you can use the `hf_transfer` package as described by Huggingface [here](https://huggingface.co/docs/huggingface_hub/en/guides/download#faster-downloads). ```bash pip install hf_transfer export HF_HUB_ENABLE_HF_TRANSFER=1 ``` You will need to request access to this repository to download the model. Once this is granted, [obtain an access token](https://huggingface.co/docs/hub/en/security-tokens) with `read` permission, and supply the token below. ### Run the model on multiple GPUs: ```python from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("databricks/dbrx-instruct", trust_remote_code=True, token="hf_YOUR_TOKEN") model = AutoModelForCausalLM.from_pretrained("databricks/dbrx-instruct", device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True, token="hf_YOUR_TOKEN") input_text = "What does it take to build a great LLM?" messages = [{"role": "user", "content": input_text}] input_ids = tokenizer.apply_chat_template(messages, return_dict=True, tokenize=True, add_generation_prompt=True, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids, max_new_tokens=200) print(tokenizer.decode(outputs[0])) ``` If your GPU system supports [FlashAttention2](https://huggingface.co/docs/transformers/perf_infer_gpu_one#flashattention-2), you can add `attn_implementation=”flash_attention_2”` as a keyword to `AutoModelForCausalLM.from_pretrained()` to achieve faster inference. ## Limitations and Ethical Considerations ### Training Dataset Limitations The DBRX models were trained on 12T tokens of text, with a knowledge cutoff date of December 2023. The training mix used for DBRX contains both natural-language and code examples. The vast majority of our training data is in the English language. We did not test DBRX for non-English proficiency. Therefore, DBRX should be considered a generalist model for text-based use in the English language. DBRX does not have multimodal capabilities. ### Associated Risks and Recommendations All foundation models are novel technologies that carry various risks, and may output information that is inaccurate, incomplete, biased, or offensive. Users should exercise judgment and evaluate such output for accuracy and appropriateness for their desired use case before using or sharing it. Databricks recommends [using retrieval augmented generation (RAG)](https://www.databricks.com/glossary/retrieval-augmented-generation-rag) in scenarios where accuracy and fidelity are important. We also recommend that anyone using or fine-tuning either DBRX Base or DBRX Instruct perform additional testing around safety in the context of their particular application and domain. ## Intended Uses ### Intended Use Cases The DBRX models are open, general-purpose LLMs intended and licensed for both commercial and research applications. They can be further fine-tuned for various domain-specific natural language and coding tasks. DBRX Instruct can be used as an off-the-shelf model for few-turn question answering related to general English-language and coding tasks. Please review the Associated Risks section above, as well as the [Databricks Open Model License](https://www.databricks.com/legal/open-model-license) and [Databricks Open Model Acceptable Use Policy](https://www.databricks.com/legal/acceptable-use-policy-open-model) for further information about permissible uses of DBRX Base and its derivatives. ### Out-of-Scope Use Cases DBRX models are not intended to be used out-of-the-box in non-English languages and do not support native code execution, or other forms of function-calling. DBRX models should not be used in any manner that violates applicable laws or regulations or in any other way that is prohibited by the [Databricks Open Model License](https://www.databricks.com/legal/open-model-license) and [Databricks Open Model Acceptable Use Policy](https://www.databricks.com/legal/acceptable-use-policy-open-model). ## Training Stack MoE models are complicated to train, and the training of DBRX Base and DBRX Instruct was heavily supported by Databricks’ infrastructure for data processing and large-scale LLM training (e.g., [Composer](https://github.com/mosaicml/composer), [Streaming](https://github.com/mosaicml/streaming), [Megablocks](https://github.com/stanford-futuredata/megablocks), and [LLM Foundry](https://github.com/mosaicml/llm-foundry)). Composer is our core library for large-scale training. It provides an optimized training loop, easy [checkpointing](https://docs.mosaicml.com/projects/composer/en/latest/trainer/checkpointing.html) and [logging](https://docs.mosaicml.com/projects/composer/en/latest/trainer/logging.html#wood-logging), [FSDP](https://pytorch.org/docs/stable/fsdp.html)-based [model sharding](https://docs.mosaicml.com/projects/composer/en/latest/notes/distributed_training.html#fullyshardeddataparallel-fsdp), convenient [abstractions](https://docs.mosaicml.com/projects/composer/en/latest/trainer/time.html), extreme customizability via [callbacks](https://docs.mosaicml.com/projects/composer/en/latest/trainer/callbacks.html), and more. Streaming enables fast, low cost, and scalable training on large datasets from cloud storage. It handles a variety of challenges around deterministic resumption as node counts change, avoiding redundant downloads across devices, high-quality shuffling at scale, sample-level random access, and speed. Megablocks is a lightweight library for MoE training. Crucially, it supports “dropless MoE,” which avoids inefficient padding and is intended to provide deterministic outputs for a given sequence no matter what other sequences are in the batch. LLM Foundry ties all of these libraries together to create a simple LLM pretraining, fine-tuning, and inference experience. DBRX was trained using proprietary optimized versions of the above open source libraries, along with our [LLM training platform](https://www.databricks.com/product/machine-learning/mosaic-ai-training). ## Evaluation We find that DBRX outperforms established open-source and open-weight base models on the [Databricks Model Gauntlet](https://www.databricks.com/blog/llm-evaluation-for-icl), the [Hugging Face Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard), and HumanEval. The Databricks Model Gauntlet measures performance on more than 30 tasks across six categories: world knowledge, common sense reasoning, language understanding, reading comprehension, symbolic problem solving, and programming. The Hugging Face Open LLM Leaderboard measures the average of ARC-Challenge, HellaSwag, MMLU, TruthfulQA, Winogrande and GSM8k. HumanEval measures coding ability. Full evaluation details can be found in our [technical blog post](https://www.databricks.com/blog/introducing-dbrx-new-state-art-open-llm). ## Acknowledgements The DBRX models were made possible thanks in large part to the open-source community, especially: * The [MegaBlocks](https://arxiv.org/abs/2211.15841) library, which established a foundation for our MoE implementation. * [PyTorch FSDP](https://arxiv.org/abs/2304.11277), which we built on for distributed training.
Salesforce/codet5p-220m
Salesforce
"2023-05-16T00:33:56Z"
116,420
21
transformers
[ "transformers", "pytorch", "t5", "text2text-generation", "arxiv:2305.07922", "license:bsd-3-clause", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text2text-generation
"2023-05-13T10:34:57Z"
--- license: bsd-3-clause --- # CodeT5+ 220M ## Model description [CodeT5+](https://github.com/salesforce/CodeT5/tree/main/CodeT5+) is a new family of open code large language models with an encoder-decoder architecture that can flexibly operate in different modes (i.e. _encoder-only_, _decoder-only_, and _encoder-decoder_) to support a wide range of code understanding and generation tasks. It is introduced in the paper: [CodeT5+: Open Code Large Language Models for Code Understanding and Generation](https://arxiv.org/pdf/2305.07922.pdf) by [Yue Wang](https://yuewang-cuhk.github.io/)\*, [Hung Le](https://sites.google.com/view/henryle2018/home?pli=1)\*, [Akhilesh Deepak Gotmare](https://akhileshgotmare.github.io/), [Nghi D.Q. Bui](https://bdqnghi.github.io/), [Junnan Li](https://sites.google.com/site/junnanlics), [Steven C.H. Hoi](https://sites.google.com/view/stevenhoi/home) (* indicates equal contribution). Compared to the original CodeT5 family (base: `220M`, large: `770M`), CodeT5+ is pretrained with a diverse set of pretraining tasks including _span denoising_, _causal language modeling_, _contrastive learning_, and _text-code matching_ to learn rich representations from both unimodal code data and bimodal code-text data. Additionally, it employs a simple yet effective _compute-efficient pretraining_ method to initialize the model components with frozen off-the-shelf LLMs such as [CodeGen](https://github.com/salesforce/CodeGen) to efficiently scale up the model (i.e. `2B`, `6B`, `16B`), and adopts a "shallow encoder and deep decoder" architecture. Furthermore, it is instruction-tuned to align with natural language instructions (see our InstructCodeT5+ 16B) following [Code Alpaca](https://github.com/sahil280114/codealpaca). ## How to use This model can be easily loaded using the `T5ForConditionalGeneration` functionality and employs the same tokenizer as original [CodeT5](https://github.com/salesforce/CodeT5). ```python from transformers import T5ForConditionalGeneration, AutoTokenizer checkpoint = "Salesforce/codet5p-220m" device = "cuda" # for GPU usage or "cpu" for CPU usage tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = T5ForConditionalGeneration.from_pretrained(checkpoint).to(device) inputs = tokenizer.encode("def print_hello_world():<extra_id_0>", return_tensors="pt").to(device) outputs = model.generate(inputs, max_length=10) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) # ==> print "Hello World" ``` ## Pretraining data This checkpoint is trained on the stricter permissive subset of the deduplicated version of the [github-code dataset](https://huggingface.co/datasets/codeparrot/github-code). The data is preprocessed by reserving only permissively licensed code ("mit" “apache-2”, “bsd-3-clause”, “bsd-2-clause”, “cc0-1.0”, “unlicense”, “isc”). Supported languages (9 in total) are as follows: `c`, `c++`, `c-sharp`, `go`, `java`, `javascript`, `php`, `python`, `ruby.` ## Training procedure This checkpoint is trained on the unimodal code data at the first-stage pretraining, which includes a diverse set of pretraining tasks including _span denoising_ and two variants of _causal language modeling_. Please refer to the paper for more details. ## Evaluation results CodeT5+ models have been comprehensively evaluated on a wide range of code understanding and generation tasks in various settings: _zero-shot_, _finetuning_, and _instruction-tuning_. Specifically, CodeT5+ yields substantial performance gains on many downstream tasks compared to their SoTA baselines, e.g., 8 text-to-code retrieval tasks (+3.2 avg. MRR), 2 line-level code completion tasks (+2.1 avg. Exact Match), and 2 retrieval-augmented code generation tasks (+5.8 avg. BLEU-4). In 2 math programming tasks on MathQA-Python and GSM8K-Python, CodeT5+ models of below billion-parameter sizes significantly outperform many LLMs of up to 137B parameters. Particularly, in the zero-shot text-to-code generation task on HumanEval benchmark, InstructCodeT5+ 16B sets new SoTA results of 35.0% pass@1 and 54.5% pass@10 against other open code LLMs, even surpassing the closed-source OpenAI code-cushman-001 mode Please refer to the [paper](https://arxiv.org/pdf/2305.07922.pdf) for more details. ## BibTeX entry and citation info ```bibtex @article{wang2023codet5plus, title={CodeT5+: Open Code Large Language Models for Code Understanding and Generation}, author={Wang, Yue and Le, Hung and Gotmare, Akhilesh Deepak and Bui, Nghi D.Q. and Li, Junnan and Hoi, Steven C. H.}, journal={arXiv preprint}, year={2023} } ```
cross-encoder/stsb-roberta-base
cross-encoder
"2021-08-05T08:41:58Z"
115,902
2
transformers
[ "transformers", "pytorch", "jax", "roberta", "text-classification", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text-classification
"2022-03-02T23:29:05Z"
--- license: apache-2.0 --- # Cross-Encoder for Quora Duplicate Questions Detection This model was trained using [SentenceTransformers](https://sbert.net) [Cross-Encoder](https://www.sbert.net/examples/applications/cross-encoder/README.html) class. ## Training Data This model was trained on the [STS benchmark dataset](http://ixa2.si.ehu.eus/stswiki/index.php/STSbenchmark). The model will predict a score between 0 and 1 how for the semantic similarity of two sentences. ## Usage and Performance Pre-trained models can be used like this: ``` from sentence_transformers import CrossEncoder model = CrossEncoder('model_name') scores = model.predict([('Sentence 1', 'Sentence 2'), ('Sentence 3', 'Sentence 4')]) ``` The model will predict scores for the pairs `('Sentence 1', 'Sentence 2')` and `('Sentence 3', 'Sentence 4')`. You can use this model also without sentence_transformers and by just using Transformers ``AutoModel`` class
intfloat/e5-mistral-7b-instruct
intfloat
"2024-01-16T07:14:13Z"
115,696
372
transformers
[ "transformers", "pytorch", "safetensors", "mistral", "feature-extraction", "mteb", "en", "arxiv:2401.00368", "arxiv:2104.08663", "arxiv:2210.07316", "license:mit", "model-index", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
feature-extraction
"2023-12-20T10:17:02Z"
--- tags: - mteb model-index: - name: e5-mistral-7b-instruct results: - task: type: STS dataset: type: C-MTEB/AFQMC name: MTEB AFQMC config: default split: validation revision: None metrics: - type: cos_sim_pearson value: 37.863226091673866 - type: cos_sim_spearman value: 38.98733013335281 - type: euclidean_pearson value: 37.51783380497874 - type: euclidean_spearman value: 38.98733012753365 - type: manhattan_pearson value: 37.26706888081721 - type: manhattan_spearman value: 38.709750161903834 - task: type: STS dataset: type: C-MTEB/ATEC name: MTEB ATEC config: default split: test revision: None metrics: - type: cos_sim_pearson value: 43.33924583134623 - type: cos_sim_spearman value: 42.84316155158754 - type: euclidean_pearson value: 45.62709879515238 - type: euclidean_spearman value: 42.843155921732404 - type: manhattan_pearson value: 45.4786950991229 - type: manhattan_spearman value: 42.657334751855984 - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (en) config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 78.68656716417911 - type: ap value: 41.71522322900398 - type: f1 value: 72.37207703532552 - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (de) config: de split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 74.04710920770879 - type: ap value: 83.42622221864045 - type: f1 value: 72.14388257905772 - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (en-ext) config: en-ext split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 77.93103448275862 - type: ap value: 26.039284760509513 - type: f1 value: 64.81092954450712 - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (ja) config: ja split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 77.21627408993577 - type: ap value: 24.876490553983036 - type: f1 value: 63.8773359684989 - task: type: Classification dataset: type: mteb/amazon_polarity name: MTEB AmazonPolarityClassification config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 95.90679999999999 - type: ap value: 94.32357863164454 - type: f1 value: 95.90485634708557 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (en) config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 55.786 - type: f1 value: 55.31211995815146 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (de) config: de split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 53.26 - type: f1 value: 52.156230111544986 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (es) config: es split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 50.33 - type: f1 value: 49.195023008878145 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (fr) config: fr split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 49.3 - type: f1 value: 48.434470184108 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (ja) config: ja split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 48.68599999999999 - type: f1 value: 47.62681775202072 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (zh) config: zh split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 46.238 - type: f1 value: 45.014030559653705 - task: type: Retrieval dataset: type: arguana name: MTEB ArguAna config: default split: test revision: None metrics: - type: map_at_1 value: 36.486000000000004 - type: map_at_10 value: 53.076 - type: map_at_100 value: 53.657999999999994 - type: map_at_1000 value: 53.659 - type: map_at_3 value: 48.234 - type: map_at_5 value: 51.121 - type: mrr_at_1 value: 37.269000000000005 - type: mrr_at_10 value: 53.335 - type: mrr_at_100 value: 53.916 - type: mrr_at_1000 value: 53.918 - type: mrr_at_3 value: 48.518 - type: mrr_at_5 value: 51.406 - type: ndcg_at_1 value: 36.486000000000004 - type: ndcg_at_10 value: 61.882000000000005 - type: ndcg_at_100 value: 64.165 - type: ndcg_at_1000 value: 64.203 - type: ndcg_at_3 value: 52.049 - type: ndcg_at_5 value: 57.199 - type: precision_at_1 value: 36.486000000000004 - type: precision_at_10 value: 8.982999999999999 - type: precision_at_100 value: 0.9939999999999999 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 21.029 - type: precision_at_5 value: 15.092 - type: recall_at_1 value: 36.486000000000004 - type: recall_at_10 value: 89.82900000000001 - type: recall_at_100 value: 99.36 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 63.087 - type: recall_at_5 value: 75.46199999999999 - task: type: Clustering dataset: type: mteb/arxiv-clustering-p2p name: MTEB ArxivClusteringP2P config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 50.45119266859667 - task: type: Clustering dataset: type: mteb/arxiv-clustering-s2s name: MTEB ArxivClusteringS2S config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 45.4958298992051 - task: type: Reranking dataset: type: mteb/askubuntudupquestions-reranking name: MTEB AskUbuntuDupQuestions config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 66.98177472838887 - type: mrr value: 79.91854636591478 - task: type: STS dataset: type: mteb/biosses-sts name: MTEB BIOSSES config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 87.67086498650698 - type: cos_sim_spearman value: 85.54773239564638 - type: euclidean_pearson value: 86.48229161588425 - type: euclidean_spearman value: 85.54773239564638 - type: manhattan_pearson value: 86.67533327742343 - type: manhattan_spearman value: 85.76099026691983 - task: type: STS dataset: type: C-MTEB/BQ name: MTEB BQ config: default split: test revision: None metrics: - type: cos_sim_pearson value: 50.31998888922809 - type: cos_sim_spearman value: 50.6369940530675 - type: euclidean_pearson value: 50.055544636296055 - type: euclidean_spearman value: 50.63699405154838 - type: manhattan_pearson value: 50.00739378036807 - type: manhattan_spearman value: 50.607237418676945 - task: type: BitextMining dataset: type: mteb/bucc-bitext-mining name: MTEB BUCC (de-en) config: de-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 99.5615866388309 - type: f1 value: 99.49895615866389 - type: precision value: 99.46764091858039 - type: recall value: 99.5615866388309 - task: type: BitextMining dataset: type: mteb/bucc-bitext-mining name: MTEB BUCC (fr-en) config: fr-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 99.19656614571869 - type: f1 value: 99.08650671362535 - type: precision value: 99.0314769975787 - type: recall value: 99.19656614571869 - task: type: BitextMining dataset: type: mteb/bucc-bitext-mining name: MTEB BUCC (ru-en) config: ru-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 98.0256321440942 - type: f1 value: 97.83743216718624 - type: precision value: 97.74390947927492 - type: recall value: 98.0256321440942 - task: type: BitextMining dataset: type: mteb/bucc-bitext-mining name: MTEB BUCC (zh-en) config: zh-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 99.26276987888363 - type: f1 value: 99.22766368264 - type: precision value: 99.21011058451816 - type: recall value: 99.26276987888363 - task: type: Classification dataset: type: mteb/banking77 name: MTEB Banking77Classification config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 88.22727272727272 - type: f1 value: 88.17411732496673 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-p2p name: MTEB BiorxivClusteringP2P config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 43.530637846246975 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-s2s name: MTEB BiorxivClusteringS2S config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 40.23505728593893 - task: type: Clustering dataset: type: C-MTEB/CLSClusteringP2P name: MTEB CLSClusteringP2P config: default split: test revision: None metrics: - type: v_measure value: 44.419028279451275 - task: type: Clustering dataset: type: C-MTEB/CLSClusteringS2S name: MTEB CLSClusteringS2S config: default split: test revision: None metrics: - type: v_measure value: 42.5820277929776 - task: type: Reranking dataset: type: C-MTEB/CMedQAv1-reranking name: MTEB CMedQAv1 config: default split: test revision: None metrics: - type: map value: 77.67811726152972 - type: mrr value: 80.99003968253969 - task: type: Reranking dataset: type: C-MTEB/CMedQAv2-reranking name: MTEB CMedQAv2 config: default split: test revision: None metrics: - type: map value: 78.66055354534922 - type: mrr value: 81.66119047619047 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 27.162333333333333 - type: map_at_10 value: 37.22291666666667 - type: map_at_100 value: 38.56733333333333 - type: map_at_1000 value: 38.684250000000006 - type: map_at_3 value: 34.22858333333333 - type: map_at_5 value: 35.852500000000006 - type: mrr_at_1 value: 32.459833333333336 - type: mrr_at_10 value: 41.65358333333333 - type: mrr_at_100 value: 42.566916666666664 - type: mrr_at_1000 value: 42.61766666666667 - type: mrr_at_3 value: 39.210499999999996 - type: mrr_at_5 value: 40.582166666666666 - type: ndcg_at_1 value: 32.459833333333336 - type: ndcg_at_10 value: 42.96758333333333 - type: ndcg_at_100 value: 48.5065 - type: ndcg_at_1000 value: 50.556583333333336 - type: ndcg_at_3 value: 38.004416666666664 - type: ndcg_at_5 value: 40.25916666666667 - type: precision_at_1 value: 32.459833333333336 - type: precision_at_10 value: 7.664583333333333 - type: precision_at_100 value: 1.2349999999999999 - type: precision_at_1000 value: 0.15966666666666668 - type: precision_at_3 value: 17.731166666666663 - type: precision_at_5 value: 12.575333333333335 - type: recall_at_1 value: 27.162333333333333 - type: recall_at_10 value: 55.44158333333334 - type: recall_at_100 value: 79.56966666666666 - type: recall_at_1000 value: 93.45224999999999 - type: recall_at_3 value: 41.433083333333336 - type: recall_at_5 value: 47.31108333333333 - task: type: Retrieval dataset: type: climate-fever name: MTEB ClimateFEVER config: default split: test revision: None metrics: - type: map_at_1 value: 16.539 - type: map_at_10 value: 28.494999999999997 - type: map_at_100 value: 30.568 - type: map_at_1000 value: 30.741000000000003 - type: map_at_3 value: 23.846999999999998 - type: map_at_5 value: 26.275 - type: mrr_at_1 value: 37.394 - type: mrr_at_10 value: 50.068 - type: mrr_at_100 value: 50.727 - type: mrr_at_1000 value: 50.751000000000005 - type: mrr_at_3 value: 46.938 - type: mrr_at_5 value: 48.818 - type: ndcg_at_1 value: 37.394 - type: ndcg_at_10 value: 38.349 - type: ndcg_at_100 value: 45.512 - type: ndcg_at_1000 value: 48.321 - type: ndcg_at_3 value: 32.172 - type: ndcg_at_5 value: 34.265 - type: precision_at_1 value: 37.394 - type: precision_at_10 value: 11.927999999999999 - type: precision_at_100 value: 1.966 - type: precision_at_1000 value: 0.25 - type: precision_at_3 value: 24.126 - type: precision_at_5 value: 18.306 - type: recall_at_1 value: 16.539 - type: recall_at_10 value: 44.504 - type: recall_at_100 value: 68.605 - type: recall_at_1000 value: 84.1 - type: recall_at_3 value: 29.008 - type: recall_at_5 value: 35.58 - task: type: Retrieval dataset: type: C-MTEB/CmedqaRetrieval name: MTEB CmedqaRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 19.482 - type: map_at_10 value: 28.622999999999998 - type: map_at_100 value: 30.262 - type: map_at_1000 value: 30.432 - type: map_at_3 value: 25.647 - type: map_at_5 value: 27.128000000000004 - type: mrr_at_1 value: 30.408 - type: mrr_at_10 value: 37.188 - type: mrr_at_100 value: 38.196000000000005 - type: mrr_at_1000 value: 38.273 - type: mrr_at_3 value: 35.067 - type: mrr_at_5 value: 36.124 - type: ndcg_at_1 value: 30.408 - type: ndcg_at_10 value: 34.215 - type: ndcg_at_100 value: 41.349999999999994 - type: ndcg_at_1000 value: 44.689 - type: ndcg_at_3 value: 30.264999999999997 - type: ndcg_at_5 value: 31.572 - type: precision_at_1 value: 30.408 - type: precision_at_10 value: 7.6770000000000005 - type: precision_at_100 value: 1.352 - type: precision_at_1000 value: 0.178 - type: precision_at_3 value: 17.213 - type: precision_at_5 value: 12.198 - type: recall_at_1 value: 19.482 - type: recall_at_10 value: 42.368 - type: recall_at_100 value: 72.694 - type: recall_at_1000 value: 95.602 - type: recall_at_3 value: 30.101 - type: recall_at_5 value: 34.708 - task: type: PairClassification dataset: type: C-MTEB/CMNLI name: MTEB Cmnli config: default split: validation revision: None metrics: - type: cos_sim_accuracy value: 71.16055321707758 - type: cos_sim_ap value: 80.21073839711723 - type: cos_sim_f1 value: 72.9740932642487 - type: cos_sim_precision value: 65.53136050623488 - type: cos_sim_recall value: 82.3240589198036 - type: dot_accuracy value: 71.16055321707758 - type: dot_ap value: 80.212299264122 - type: dot_f1 value: 72.9740932642487 - type: dot_precision value: 65.53136050623488 - type: dot_recall value: 82.3240589198036 - type: euclidean_accuracy value: 71.16055321707758 - type: euclidean_ap value: 80.21076298680417 - type: euclidean_f1 value: 72.9740932642487 - type: euclidean_precision value: 65.53136050623488 - type: euclidean_recall value: 82.3240589198036 - type: manhattan_accuracy value: 70.71557426337944 - type: manhattan_ap value: 79.93448977199749 - type: manhattan_f1 value: 72.83962726826877 - type: manhattan_precision value: 62.7407908077053 - type: manhattan_recall value: 86.81318681318682 - type: max_accuracy value: 71.16055321707758 - type: max_ap value: 80.212299264122 - type: max_f1 value: 72.9740932642487 - task: type: Retrieval dataset: type: C-MTEB/CovidRetrieval name: MTEB CovidRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 60.643 - type: map_at_10 value: 69.011 - type: map_at_100 value: 69.533 - type: map_at_1000 value: 69.545 - type: map_at_3 value: 67.167 - type: map_at_5 value: 68.12700000000001 - type: mrr_at_1 value: 60.801 - type: mrr_at_10 value: 69.111 - type: mrr_at_100 value: 69.6 - type: mrr_at_1000 value: 69.611 - type: mrr_at_3 value: 67.229 - type: mrr_at_5 value: 68.214 - type: ndcg_at_1 value: 60.801 - type: ndcg_at_10 value: 73.128 - type: ndcg_at_100 value: 75.614 - type: ndcg_at_1000 value: 75.92 - type: ndcg_at_3 value: 69.261 - type: ndcg_at_5 value: 70.973 - type: precision_at_1 value: 60.801 - type: precision_at_10 value: 8.662 - type: precision_at_100 value: 0.9860000000000001 - type: precision_at_1000 value: 0.101 - type: precision_at_3 value: 25.149 - type: precision_at_5 value: 15.953999999999999 - type: recall_at_1 value: 60.643 - type: recall_at_10 value: 85.959 - type: recall_at_100 value: 97.576 - type: recall_at_1000 value: 100.0 - type: recall_at_3 value: 75.184 - type: recall_at_5 value: 79.32000000000001 - task: type: Retrieval dataset: type: dbpedia-entity name: MTEB DBPedia config: default split: test revision: None metrics: - type: map_at_1 value: 10.183 - type: map_at_10 value: 23.958 - type: map_at_100 value: 34.354 - type: map_at_1000 value: 36.442 - type: map_at_3 value: 16.345000000000002 - type: map_at_5 value: 19.647000000000002 - type: mrr_at_1 value: 74.25 - type: mrr_at_10 value: 80.976 - type: mrr_at_100 value: 81.256 - type: mrr_at_1000 value: 81.262 - type: mrr_at_3 value: 79.958 - type: mrr_at_5 value: 80.37100000000001 - type: ndcg_at_1 value: 62.0 - type: ndcg_at_10 value: 48.894999999999996 - type: ndcg_at_100 value: 53.867 - type: ndcg_at_1000 value: 61.304 - type: ndcg_at_3 value: 53.688 - type: ndcg_at_5 value: 50.900999999999996 - type: precision_at_1 value: 74.25 - type: precision_at_10 value: 39.525 - type: precision_at_100 value: 12.323 - type: precision_at_1000 value: 2.539 - type: precision_at_3 value: 57.49999999999999 - type: precision_at_5 value: 49.1 - type: recall_at_1 value: 10.183 - type: recall_at_10 value: 29.296 - type: recall_at_100 value: 60.394999999999996 - type: recall_at_1000 value: 83.12 - type: recall_at_3 value: 17.495 - type: recall_at_5 value: 22.235 - task: type: Retrieval dataset: type: C-MTEB/DuRetrieval name: MTEB DuRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 26.613999999999997 - type: map_at_10 value: 79.77300000000001 - type: map_at_100 value: 82.71 - type: map_at_1000 value: 82.75 - type: map_at_3 value: 55.92700000000001 - type: map_at_5 value: 70.085 - type: mrr_at_1 value: 90.7 - type: mrr_at_10 value: 93.438 - type: mrr_at_100 value: 93.504 - type: mrr_at_1000 value: 93.50699999999999 - type: mrr_at_3 value: 93.125 - type: mrr_at_5 value: 93.34 - type: ndcg_at_1 value: 90.7 - type: ndcg_at_10 value: 87.023 - type: ndcg_at_100 value: 90.068 - type: ndcg_at_1000 value: 90.43299999999999 - type: ndcg_at_3 value: 86.339 - type: ndcg_at_5 value: 85.013 - type: precision_at_1 value: 90.7 - type: precision_at_10 value: 41.339999999999996 - type: precision_at_100 value: 4.806 - type: precision_at_1000 value: 0.48900000000000005 - type: precision_at_3 value: 76.983 - type: precision_at_5 value: 64.69 - type: recall_at_1 value: 26.613999999999997 - type: recall_at_10 value: 87.681 - type: recall_at_100 value: 97.44699999999999 - type: recall_at_1000 value: 99.348 - type: recall_at_3 value: 57.809999999999995 - type: recall_at_5 value: 74.258 - task: type: Retrieval dataset: type: C-MTEB/EcomRetrieval name: MTEB EcomRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 30.9 - type: map_at_10 value: 40.467 - type: map_at_100 value: 41.423 - type: map_at_1000 value: 41.463 - type: map_at_3 value: 37.25 - type: map_at_5 value: 39.31 - type: mrr_at_1 value: 30.9 - type: mrr_at_10 value: 40.467 - type: mrr_at_100 value: 41.423 - type: mrr_at_1000 value: 41.463 - type: mrr_at_3 value: 37.25 - type: mrr_at_5 value: 39.31 - type: ndcg_at_1 value: 30.9 - type: ndcg_at_10 value: 45.957 - type: ndcg_at_100 value: 50.735 - type: ndcg_at_1000 value: 51.861999999999995 - type: ndcg_at_3 value: 39.437 - type: ndcg_at_5 value: 43.146 - type: precision_at_1 value: 30.9 - type: precision_at_10 value: 6.35 - type: precision_at_100 value: 0.861 - type: precision_at_1000 value: 0.095 - type: precision_at_3 value: 15.267 - type: precision_at_5 value: 10.96 - type: recall_at_1 value: 30.9 - type: recall_at_10 value: 63.5 - type: recall_at_100 value: 86.1 - type: recall_at_1000 value: 95.1 - type: recall_at_3 value: 45.800000000000004 - type: recall_at_5 value: 54.800000000000004 - task: type: Classification dataset: type: mteb/emotion name: MTEB EmotionClassification config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 49.765 - type: f1 value: 45.93242203574485 - task: type: Retrieval dataset: type: fever name: MTEB FEVER config: default split: test revision: None metrics: - type: map_at_1 value: 75.138 - type: map_at_10 value: 84.21300000000001 - type: map_at_100 value: 84.43 - type: map_at_1000 value: 84.441 - type: map_at_3 value: 83.071 - type: map_at_5 value: 83.853 - type: mrr_at_1 value: 80.948 - type: mrr_at_10 value: 88.175 - type: mrr_at_100 value: 88.24 - type: mrr_at_1000 value: 88.241 - type: mrr_at_3 value: 87.516 - type: mrr_at_5 value: 87.997 - type: ndcg_at_1 value: 80.948 - type: ndcg_at_10 value: 87.84100000000001 - type: ndcg_at_100 value: 88.576 - type: ndcg_at_1000 value: 88.75699999999999 - type: ndcg_at_3 value: 86.176 - type: ndcg_at_5 value: 87.214 - type: precision_at_1 value: 80.948 - type: precision_at_10 value: 10.632 - type: precision_at_100 value: 1.123 - type: precision_at_1000 value: 0.11499999999999999 - type: precision_at_3 value: 33.193 - type: precision_at_5 value: 20.663 - type: recall_at_1 value: 75.138 - type: recall_at_10 value: 94.89699999999999 - type: recall_at_100 value: 97.751 - type: recall_at_1000 value: 98.833 - type: recall_at_3 value: 90.455 - type: recall_at_5 value: 93.085 - task: type: Retrieval dataset: type: fiqa name: MTEB FiQA2018 config: default split: test revision: None metrics: - type: map_at_1 value: 29.45 - type: map_at_10 value: 48.596000000000004 - type: map_at_100 value: 50.70400000000001 - type: map_at_1000 value: 50.83800000000001 - type: map_at_3 value: 42.795 - type: map_at_5 value: 46.085 - type: mrr_at_1 value: 56.172999999999995 - type: mrr_at_10 value: 64.35300000000001 - type: mrr_at_100 value: 64.947 - type: mrr_at_1000 value: 64.967 - type: mrr_at_3 value: 62.653999999999996 - type: mrr_at_5 value: 63.534 - type: ndcg_at_1 value: 56.172999999999995 - type: ndcg_at_10 value: 56.593 - type: ndcg_at_100 value: 62.942 - type: ndcg_at_1000 value: 64.801 - type: ndcg_at_3 value: 53.024 - type: ndcg_at_5 value: 53.986999999999995 - type: precision_at_1 value: 56.172999999999995 - type: precision_at_10 value: 15.494 - type: precision_at_100 value: 2.222 - type: precision_at_1000 value: 0.254 - type: precision_at_3 value: 35.185 - type: precision_at_5 value: 25.556 - type: recall_at_1 value: 29.45 - type: recall_at_10 value: 62.882000000000005 - type: recall_at_100 value: 85.56099999999999 - type: recall_at_1000 value: 96.539 - type: recall_at_3 value: 47.911 - type: recall_at_5 value: 54.52 - task: type: Retrieval dataset: type: hotpotqa name: MTEB HotpotQA config: default split: test revision: None metrics: - type: map_at_1 value: 39.581 - type: map_at_10 value: 68.401 - type: map_at_100 value: 69.207 - type: map_at_1000 value: 69.25200000000001 - type: map_at_3 value: 64.689 - type: map_at_5 value: 67.158 - type: mrr_at_1 value: 79.163 - type: mrr_at_10 value: 85.22999999999999 - type: mrr_at_100 value: 85.386 - type: mrr_at_1000 value: 85.39099999999999 - type: mrr_at_3 value: 84.432 - type: mrr_at_5 value: 84.952 - type: ndcg_at_1 value: 79.163 - type: ndcg_at_10 value: 75.721 - type: ndcg_at_100 value: 78.411 - type: ndcg_at_1000 value: 79.23599999999999 - type: ndcg_at_3 value: 70.68799999999999 - type: ndcg_at_5 value: 73.694 - type: precision_at_1 value: 79.163 - type: precision_at_10 value: 16.134 - type: precision_at_100 value: 1.821 - type: precision_at_1000 value: 0.193 - type: precision_at_3 value: 46.446 - type: precision_at_5 value: 30.242 - type: recall_at_1 value: 39.581 - type: recall_at_10 value: 80.66799999999999 - type: recall_at_100 value: 91.033 - type: recall_at_1000 value: 96.408 - type: recall_at_3 value: 69.669 - type: recall_at_5 value: 75.604 - task: type: Classification dataset: type: C-MTEB/IFlyTek-classification name: MTEB IFlyTek config: default split: validation revision: None metrics: - type: accuracy value: 45.04809542131589 - type: f1 value: 37.01181779071118 - task: type: Classification dataset: type: mteb/imdb name: MTEB ImdbClassification config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 94.78120000000001 - type: ap value: 92.52931921594387 - type: f1 value: 94.77902110732532 - task: type: Classification dataset: type: C-MTEB/JDReview-classification name: MTEB JDReview config: default split: test revision: None metrics: - type: accuracy value: 85.81613508442777 - type: ap value: 52.430320593468394 - type: f1 value: 79.95467268178068 - task: type: STS dataset: type: C-MTEB/LCQMC name: MTEB LCQMC config: default split: test revision: None metrics: - type: cos_sim_pearson value: 71.05801751913393 - type: cos_sim_spearman value: 75.47954644971965 - type: euclidean_pearson value: 74.27472296759713 - type: euclidean_spearman value: 75.47954201369866 - type: manhattan_pearson value: 74.30508190186474 - type: manhattan_spearman value: 75.51326518159436 - task: type: Reranking dataset: type: C-MTEB/Mmarco-reranking name: MTEB MMarcoReranking config: default split: dev revision: None metrics: - type: map value: 24.21110921666315 - type: mrr value: 22.863492063492064 - task: type: Retrieval dataset: type: C-MTEB/MMarcoRetrieval name: MTEB MMarcoRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 61.38400000000001 - type: map_at_10 value: 70.895 - type: map_at_100 value: 71.314 - type: map_at_1000 value: 71.331 - type: map_at_3 value: 69.016 - type: map_at_5 value: 70.179 - type: mrr_at_1 value: 63.481 - type: mrr_at_10 value: 71.543 - type: mrr_at_100 value: 71.91300000000001 - type: mrr_at_1000 value: 71.928 - type: mrr_at_3 value: 69.90899999999999 - type: mrr_at_5 value: 70.907 - type: ndcg_at_1 value: 63.481 - type: ndcg_at_10 value: 74.833 - type: ndcg_at_100 value: 76.705 - type: ndcg_at_1000 value: 77.13600000000001 - type: ndcg_at_3 value: 71.236 - type: ndcg_at_5 value: 73.199 - type: precision_at_1 value: 63.481 - type: precision_at_10 value: 9.179 - type: precision_at_100 value: 1.011 - type: precision_at_1000 value: 0.105 - type: precision_at_3 value: 27.044 - type: precision_at_5 value: 17.272000000000002 - type: recall_at_1 value: 61.38400000000001 - type: recall_at_10 value: 86.318 - type: recall_at_100 value: 94.786 - type: recall_at_1000 value: 98.14500000000001 - type: recall_at_3 value: 76.717 - type: recall_at_5 value: 81.416 - task: type: Retrieval dataset: type: msmarco name: MTEB MSMARCO config: default split: dev revision: None metrics: - type: map_at_1 value: 23.363999999999997 - type: map_at_10 value: 36.022 - type: map_at_100 value: 37.229 - type: map_at_1000 value: 37.274 - type: map_at_3 value: 32.131 - type: map_at_5 value: 34.391 - type: mrr_at_1 value: 24.069 - type: mrr_at_10 value: 36.620000000000005 - type: mrr_at_100 value: 37.769999999999996 - type: mrr_at_1000 value: 37.809 - type: mrr_at_3 value: 32.846 - type: mrr_at_5 value: 35.02 - type: ndcg_at_1 value: 24.069 - type: ndcg_at_10 value: 43.056 - type: ndcg_at_100 value: 48.754 - type: ndcg_at_1000 value: 49.829 - type: ndcg_at_3 value: 35.167 - type: ndcg_at_5 value: 39.168 - type: precision_at_1 value: 24.069 - type: precision_at_10 value: 6.762 - type: precision_at_100 value: 0.96 - type: precision_at_1000 value: 0.105 - type: precision_at_3 value: 14.957 - type: precision_at_5 value: 11.023 - type: recall_at_1 value: 23.363999999999997 - type: recall_at_10 value: 64.696 - type: recall_at_100 value: 90.795 - type: recall_at_1000 value: 98.892 - type: recall_at_3 value: 43.247 - type: recall_at_5 value: 52.86300000000001 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (en) config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 96.11947104423166 - type: f1 value: 95.89561841159332 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (de) config: de split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 92.97548605240912 - type: f1 value: 92.17133696717212 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (es) config: es split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 93.37224816544364 - type: f1 value: 93.19978829237863 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (fr) config: fr split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 91.28719072972127 - type: f1 value: 91.28448045979604 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (hi) config: hi split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 88.8131946934385 - type: f1 value: 88.27883019362747 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (th) config: th split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 85.52260397830018 - type: f1 value: 85.15528226728568 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (en) config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 86.10807113543093 - type: f1 value: 70.88498219072167 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (de) config: de split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 77.77120315581854 - type: f1 value: 57.97153920153224 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (es) config: es split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 79.93995997331554 - type: f1 value: 58.839203810064866 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (fr) config: fr split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 77.801440651425 - type: f1 value: 58.68009647839332 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (hi) config: hi split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 72.90785227680172 - type: f1 value: 49.83760954655788 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (th) config: th split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 73.24050632911391 - type: f1 value: 52.0562553541082 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (af) config: af split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.47948890383321 - type: f1 value: 63.334877563135485 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (am) config: am split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 44.2871553463349 - type: f1 value: 43.17658050605427 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ar) config: ar split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 63.174176193678555 - type: f1 value: 59.236659587042425 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (az) config: az split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.226630800269 - type: f1 value: 60.951842696956184 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (bn) config: bn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.94283792871555 - type: f1 value: 61.40057652844215 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (cy) config: cy split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 55.480833893745796 - type: f1 value: 52.5298332072816 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (da) config: da split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 72.52858103564223 - type: f1 value: 69.3770851919204 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (de) config: de split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 74.09213180901143 - type: f1 value: 71.13518469365879 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (el) config: el split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.31203765971756 - type: f1 value: 66.05906970865144 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (en) config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 80.57162071284465 - type: f1 value: 77.7866172598823 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (es) config: es split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 75.09414929388029 - type: f1 value: 72.5712594833695 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (fa) config: fa split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 72.20914593140553 - type: f1 value: 68.90619124909186 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (fi) config: fi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.74243443174176 - type: f1 value: 64.72743141749955 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (fr) config: fr split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 75.11096166778749 - type: f1 value: 72.61849933064694 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (he) config: he split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.22394082044384 - type: f1 value: 62.43648797607235 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (hi) config: hi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.44855413584399 - type: f1 value: 66.56851670913659 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (hu) config: hu split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.4149293880296 - type: f1 value: 66.12960877904776 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (hy) config: hy split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 56.916610625420304 - type: f1 value: 54.02534600927991 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (id) config: id split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 72.71351714862138 - type: f1 value: 69.70227985126316 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (is) config: is split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 59.91257565568257 - type: f1 value: 57.06811572144974 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (it) config: it split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 75.25218560860793 - type: f1 value: 72.48057563104247 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ja) config: ja split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 76.35507733691998 - type: f1 value: 73.03024649541128 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (jv) config: jv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 57.918628110289184 - type: f1 value: 54.75590124456177 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ka) config: ka split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 52.548755884330866 - type: f1 value: 51.5356975360209 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (km) config: km split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 46.44922663080027 - type: f1 value: 44.561114416830975 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (kn) config: kn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 53.95763281775386 - type: f1 value: 50.68367245122476 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ko) config: ko split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 74.20645595158035 - type: f1 value: 71.78450093258185 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (lv) config: lv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 59.226630800269 - type: f1 value: 57.53988988993337 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ml) config: ml split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 51.44922663080027 - type: f1 value: 48.58809018065056 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (mn) config: mn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 51.3752521856086 - type: f1 value: 49.91373941436425 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ms) config: ms split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.85205110961668 - type: f1 value: 67.05660019588582 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (my) config: my split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 49.1492938802959 - type: f1 value: 46.717578025393195 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (nb) config: nb split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 70.93140551445865 - type: f1 value: 67.45406609372205 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (nl) config: nl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 74.82851378614662 - type: f1 value: 71.15951964393868 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (pl) config: pl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 74.84868863483524 - type: f1 value: 71.76056802364877 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (pt) config: pt split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 75.27236045729657 - type: f1 value: 72.48733090101163 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ro) config: ro split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.63012777404168 - type: f1 value: 66.56444015346203 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ru) config: ru split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 76.62743779421655 - type: f1 value: 73.82720656992142 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (sl) config: sl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.15198386012105 - type: f1 value: 64.41418309797744 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (sq) config: sq split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.8399462004035 - type: f1 value: 56.050989519693886 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (sv) config: sv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 73.86684599865501 - type: f1 value: 70.80682480844303 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (sw) config: sw split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 57.36718224613316 - type: f1 value: 54.998746471013774 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ta) config: ta split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 53.150638870208475 - type: f1 value: 49.79179342620099 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (te) config: te split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 51.50638870208473 - type: f1 value: 49.778960742003555 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (th) config: th split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.906523201076 - type: f1 value: 66.75784022138245 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (tl) config: tl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.73234700739744 - type: f1 value: 65.75016141148413 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (tr) config: tr split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 72.06792199058508 - type: f1 value: 67.90334782594083 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ur) config: ur split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 62.09145931405515 - type: f1 value: 58.88703095210731 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (vi) config: vi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 71.17014122394083 - type: f1 value: 68.43676277921544 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (zh-CN) config: zh-CN split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 74.99327505043712 - type: f1 value: 72.26813373392943 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (zh-TW) config: zh-TW split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 71.13987895090787 - type: f1 value: 70.29309514467575 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (af) config: af split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 73.37256220578345 - type: f1 value: 72.56456170538992 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (am) config: am split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 47.205783456624076 - type: f1 value: 45.905999859074434 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ar) config: ar split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 69.8352387357095 - type: f1 value: 69.43553987525273 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (az) config: az split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 67.00403496973773 - type: f1 value: 65.97477215779143 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (bn) config: bn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 68.04976462676531 - type: f1 value: 67.24581993778398 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (cy) config: cy split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 61.882985877605925 - type: f1 value: 59.995293199988794 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (da) config: da split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 76.75857431069267 - type: f1 value: 76.52031675299841 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (de) config: de split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 79.03496973772697 - type: f1 value: 79.25548063175344 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (el) config: el split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.96570275722931 - type: f1 value: 72.19110435289122 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (en) config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 82.38735709482178 - type: f1 value: 82.34495627619785 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (es) config: es split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 78.83994620040352 - type: f1 value: 78.91526355393667 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (fa) config: fa split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 76.7350369872226 - type: f1 value: 75.919437344927 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (fi) config: fi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.21721587088096 - type: f1 value: 70.82973286243262 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (fr) config: fr split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 78.59784801613988 - type: f1 value: 78.47383161087423 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (he) config: he split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 69.64021519838602 - type: f1 value: 68.45118053027653 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (hi) config: hi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 73.51042367182245 - type: f1 value: 72.90013022879003 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (hu) config: hu split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 74.0551445864156 - type: f1 value: 73.45871761713292 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (hy) config: hy split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 59.54606590450571 - type: f1 value: 57.72711794953869 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (id) config: id split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 77.40753194351042 - type: f1 value: 76.8157455506521 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (is) config: is split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 66.58372562205783 - type: f1 value: 65.2654868709758 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (it) config: it split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 78.39273705447208 - type: f1 value: 78.3592956594837 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ja) config: ja split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 79.62004034969739 - type: f1 value: 79.78673754501855 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (jv) config: jv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 64.29051782111634 - type: f1 value: 63.12502587609454 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ka) config: ka split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 57.51849361129791 - type: f1 value: 56.32320906403241 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (km) config: km split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 52.41761936785474 - type: f1 value: 49.113762010098306 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (kn) config: kn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 58.547410894418284 - type: f1 value: 56.87580674198118 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ko) config: ko split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 78.89038332212507 - type: f1 value: 79.09210140529848 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (lv) config: lv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 63.503698722259585 - type: f1 value: 61.45718858568352 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ml) config: ml split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 54.02824478816408 - type: f1 value: 52.732738981386504 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (mn) config: mn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 54.23671822461331 - type: f1 value: 52.688080372545286 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ms) config: ms split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 75.5312710154674 - type: f1 value: 74.59368478550698 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (my) config: my split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 52.192333557498316 - type: f1 value: 50.18302290152229 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (nb) config: nb split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 75.6960322797579 - type: f1 value: 75.25331182714856 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (nl) config: nl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 78.47679892400808 - type: f1 value: 78.24044732352424 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (pl) config: pl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 77.36718224613315 - type: f1 value: 77.2714452985389 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (pt) config: pt split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 77.96234028244788 - type: f1 value: 78.21282127011372 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ro) config: ro split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 73.19435104236717 - type: f1 value: 73.1963711292812 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ru) config: ru split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 80.52118359112306 - type: f1 value: 80.4179964390288 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (sl) config: sl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 73.65837256220577 - type: f1 value: 73.07156989634905 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (sq) config: sq split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 64.02824478816409 - type: f1 value: 62.972399027713664 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (sv) config: sv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 78.87020847343645 - type: f1 value: 78.224240866849 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (sw) config: sw split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 64.6570275722932 - type: f1 value: 63.274871811412545 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ta) config: ta split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 57.760591795561524 - type: f1 value: 56.73711528075771 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (te) config: te split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 57.26967047747142 - type: f1 value: 55.74735330863165 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (th) config: th split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.46133154001345 - type: f1 value: 71.9644168952811 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (tl) config: tl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 73.70880968392737 - type: f1 value: 73.61543141070884 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (tr) config: tr split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 75.0437121721587 - type: f1 value: 74.83359868879921 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ur) config: ur split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 67.05110961667788 - type: f1 value: 66.25869819274315 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (vi) config: vi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 75.52118359112306 - type: f1 value: 75.92098546052303 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (zh-CN) config: zh-CN split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 79.92938802958977 - type: f1 value: 79.79833572573796 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (zh-TW) config: zh-TW split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 76.86617350369872 - type: f1 value: 77.42645654909516 - task: type: Retrieval dataset: type: C-MTEB/MedicalRetrieval name: MTEB MedicalRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 44.6 - type: map_at_10 value: 50.019000000000005 - type: map_at_100 value: 50.611 - type: map_at_1000 value: 50.67 - type: map_at_3 value: 48.699999999999996 - type: map_at_5 value: 49.455 - type: mrr_at_1 value: 44.800000000000004 - type: mrr_at_10 value: 50.119 - type: mrr_at_100 value: 50.711 - type: mrr_at_1000 value: 50.77 - type: mrr_at_3 value: 48.8 - type: mrr_at_5 value: 49.555 - type: ndcg_at_1 value: 44.6 - type: ndcg_at_10 value: 52.754 - type: ndcg_at_100 value: 55.935 - type: ndcg_at_1000 value: 57.607 - type: ndcg_at_3 value: 50.012 - type: ndcg_at_5 value: 51.393 - type: precision_at_1 value: 44.6 - type: precision_at_10 value: 6.140000000000001 - type: precision_at_100 value: 0.77 - type: precision_at_1000 value: 0.09 - type: precision_at_3 value: 17.933 - type: precision_at_5 value: 11.44 - type: recall_at_1 value: 44.6 - type: recall_at_10 value: 61.4 - type: recall_at_100 value: 77.0 - type: recall_at_1000 value: 90.4 - type: recall_at_3 value: 53.800000000000004 - type: recall_at_5 value: 57.199999999999996 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-p2p name: MTEB MedrxivClusteringP2P config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 38.192667527616315 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-s2s name: MTEB MedrxivClusteringS2S config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 37.44738902946689 - task: type: Reranking dataset: type: mteb/mind_small name: MTEB MindSmallReranking config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 32.59661273103955 - type: mrr value: 33.82024242497473 - task: type: Classification dataset: type: C-MTEB/MultilingualSentiment-classification name: MTEB MultilingualSentiment config: default split: validation revision: None metrics: - type: accuracy value: 73.31333333333335 - type: f1 value: 73.0873466527602 - task: type: Retrieval dataset: type: nfcorpus name: MTEB NFCorpus config: default split: test revision: None metrics: - type: map_at_1 value: 5.471 - type: map_at_10 value: 14.142 - type: map_at_100 value: 18.179000000000002 - type: map_at_1000 value: 19.772000000000002 - type: map_at_3 value: 9.716 - type: map_at_5 value: 11.763 - type: mrr_at_1 value: 51.393 - type: mrr_at_10 value: 58.814 - type: mrr_at_100 value: 59.330000000000005 - type: mrr_at_1000 value: 59.35 - type: mrr_at_3 value: 56.398 - type: mrr_at_5 value: 58.038999999999994 - type: ndcg_at_1 value: 49.69 - type: ndcg_at_10 value: 38.615 - type: ndcg_at_100 value: 35.268 - type: ndcg_at_1000 value: 43.745 - type: ndcg_at_3 value: 43.187 - type: ndcg_at_5 value: 41.528999999999996 - type: precision_at_1 value: 51.083999999999996 - type: precision_at_10 value: 29.474 - type: precision_at_100 value: 9.167 - type: precision_at_1000 value: 2.2089999999999996 - type: precision_at_3 value: 40.351 - type: precision_at_5 value: 36.285000000000004 - type: recall_at_1 value: 5.471 - type: recall_at_10 value: 19.242 - type: recall_at_100 value: 37.14 - type: recall_at_1000 value: 68.35900000000001 - type: recall_at_3 value: 10.896 - type: recall_at_5 value: 14.75 - task: type: Retrieval dataset: type: nq name: MTEB NQ config: default split: test revision: None metrics: - type: map_at_1 value: 39.499 - type: map_at_10 value: 55.862 - type: map_at_100 value: 56.667 - type: map_at_1000 value: 56.684999999999995 - type: map_at_3 value: 51.534 - type: map_at_5 value: 54.2 - type: mrr_at_1 value: 44.351 - type: mrr_at_10 value: 58.567 - type: mrr_at_100 value: 59.099000000000004 - type: mrr_at_1000 value: 59.109 - type: mrr_at_3 value: 55.218999999999994 - type: mrr_at_5 value: 57.391999999999996 - type: ndcg_at_1 value: 44.322 - type: ndcg_at_10 value: 63.535 - type: ndcg_at_100 value: 66.654 - type: ndcg_at_1000 value: 66.991 - type: ndcg_at_3 value: 55.701 - type: ndcg_at_5 value: 60.06700000000001 - type: precision_at_1 value: 44.322 - type: precision_at_10 value: 10.026 - type: precision_at_100 value: 1.18 - type: precision_at_1000 value: 0.121 - type: precision_at_3 value: 24.865000000000002 - type: precision_at_5 value: 17.48 - type: recall_at_1 value: 39.499 - type: recall_at_10 value: 84.053 - type: recall_at_100 value: 97.11 - type: recall_at_1000 value: 99.493 - type: recall_at_3 value: 64.091 - type: recall_at_5 value: 74.063 - task: type: PairClassification dataset: type: C-MTEB/OCNLI name: MTEB Ocnli config: default split: validation revision: None metrics: - type: cos_sim_accuracy value: 61.18029236599891 - type: cos_sim_ap value: 64.18398769398412 - type: cos_sim_f1 value: 67.96347757046446 - type: cos_sim_precision value: 54.4529262086514 - type: cos_sim_recall value: 90.3907074973601 - type: dot_accuracy value: 61.18029236599891 - type: dot_ap value: 64.18393484706077 - type: dot_f1 value: 67.96347757046446 - type: dot_precision value: 54.4529262086514 - type: dot_recall value: 90.3907074973601 - type: euclidean_accuracy value: 61.18029236599891 - type: euclidean_ap value: 64.18395024821486 - type: euclidean_f1 value: 67.96347757046446 - type: euclidean_precision value: 54.4529262086514 - type: euclidean_recall value: 90.3907074973601 - type: manhattan_accuracy value: 61.451001624255554 - type: manhattan_ap value: 64.38232708763513 - type: manhattan_f1 value: 68.05860805860804 - type: manhattan_precision value: 52.10319685922602 - type: manhattan_recall value: 98.09926082365365 - type: max_accuracy value: 61.451001624255554 - type: max_ap value: 64.38232708763513 - type: max_f1 value: 68.05860805860804 - task: type: Classification dataset: type: C-MTEB/OnlineShopping-classification name: MTEB OnlineShopping config: default split: test revision: None metrics: - type: accuracy value: 92.19000000000001 - type: ap value: 89.73918431886767 - type: f1 value: 92.17175032574507 - task: type: STS dataset: type: C-MTEB/PAWSX name: MTEB PAWSX config: default split: test revision: None metrics: - type: cos_sim_pearson value: 15.079320253752224 - type: cos_sim_spearman value: 16.813772504404263 - type: euclidean_pearson value: 19.476541162041762 - type: euclidean_spearman value: 16.813772498098782 - type: manhattan_pearson value: 19.497429832915277 - type: manhattan_spearman value: 16.869600674180607 - task: type: STS dataset: type: C-MTEB/QBQTC name: MTEB QBQTC config: default split: test revision: None metrics: - type: cos_sim_pearson value: 30.36139599797913 - type: cos_sim_spearman value: 31.80296402851347 - type: euclidean_pearson value: 30.10387888252793 - type: euclidean_spearman value: 31.80297780103808 - type: manhattan_pearson value: 30.86720382849436 - type: manhattan_spearman value: 32.70491131366606 - task: type: Retrieval dataset: type: quora name: MTEB QuoraRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 71.911 - type: map_at_10 value: 86.087 - type: map_at_100 value: 86.701 - type: map_at_1000 value: 86.715 - type: map_at_3 value: 83.231 - type: map_at_5 value: 85.051 - type: mrr_at_1 value: 82.75 - type: mrr_at_10 value: 88.759 - type: mrr_at_100 value: 88.844 - type: mrr_at_1000 value: 88.844 - type: mrr_at_3 value: 87.935 - type: mrr_at_5 value: 88.504 - type: ndcg_at_1 value: 82.75 - type: ndcg_at_10 value: 89.605 - type: ndcg_at_100 value: 90.664 - type: ndcg_at_1000 value: 90.733 - type: ndcg_at_3 value: 87.03 - type: ndcg_at_5 value: 88.473 - type: precision_at_1 value: 82.75 - type: precision_at_10 value: 13.575000000000001 - type: precision_at_100 value: 1.539 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 38.153 - type: precision_at_5 value: 25.008000000000003 - type: recall_at_1 value: 71.911 - type: recall_at_10 value: 96.261 - type: recall_at_100 value: 99.72800000000001 - type: recall_at_1000 value: 99.993 - type: recall_at_3 value: 88.762 - type: recall_at_5 value: 92.949 - task: type: Clustering dataset: type: mteb/reddit-clustering name: MTEB RedditClustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 57.711581165572376 - task: type: Clustering dataset: type: mteb/reddit-clustering-p2p name: MTEB RedditClusteringP2P config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 66.48938885750297 - task: type: Retrieval dataset: type: scidocs name: MTEB SCIDOCS config: default split: test revision: None metrics: - type: map_at_1 value: 3.7379999999999995 - type: map_at_10 value: 9.261 - type: map_at_100 value: 11.001 - type: map_at_1000 value: 11.262 - type: map_at_3 value: 6.816 - type: map_at_5 value: 8.0 - type: mrr_at_1 value: 18.4 - type: mrr_at_10 value: 28.755999999999997 - type: mrr_at_100 value: 29.892000000000003 - type: mrr_at_1000 value: 29.961 - type: mrr_at_3 value: 25.467000000000002 - type: mrr_at_5 value: 27.332 - type: ndcg_at_1 value: 18.4 - type: ndcg_at_10 value: 16.296 - type: ndcg_at_100 value: 23.52 - type: ndcg_at_1000 value: 28.504 - type: ndcg_at_3 value: 15.485 - type: ndcg_at_5 value: 13.471 - type: precision_at_1 value: 18.4 - type: precision_at_10 value: 8.469999999999999 - type: precision_at_100 value: 1.8950000000000002 - type: precision_at_1000 value: 0.309 - type: precision_at_3 value: 14.6 - type: precision_at_5 value: 11.84 - type: recall_at_1 value: 3.7379999999999995 - type: recall_at_10 value: 17.185 - type: recall_at_100 value: 38.397 - type: recall_at_1000 value: 62.798 - type: recall_at_3 value: 8.896999999999998 - type: recall_at_5 value: 12.021999999999998 - task: type: STS dataset: type: mteb/sickr-sts name: MTEB SICK-R config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 86.43977757480083 - type: cos_sim_spearman value: 82.64182475199533 - type: euclidean_pearson value: 83.71756009999591 - type: euclidean_spearman value: 82.64182331395057 - type: manhattan_pearson value: 83.8028936913025 - type: manhattan_spearman value: 82.71024597804252 - task: type: STS dataset: type: mteb/sts12-sts name: MTEB STS12 config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 86.85653060698912 - type: cos_sim_spearman value: 79.65598885228324 - type: euclidean_pearson value: 83.1205137628455 - type: euclidean_spearman value: 79.65629387709038 - type: manhattan_pearson value: 83.71108853545837 - type: manhattan_spearman value: 80.25617619716708 - task: type: STS dataset: type: mteb/sts13-sts name: MTEB STS13 config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 88.22921688565664 - type: cos_sim_spearman value: 88.42662103041957 - type: euclidean_pearson value: 87.91679798473325 - type: euclidean_spearman value: 88.42662103041957 - type: manhattan_pearson value: 88.16927537961303 - type: manhattan_spearman value: 88.81581680062541 - task: type: STS dataset: type: mteb/sts14-sts name: MTEB STS14 config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 86.77261424554293 - type: cos_sim_spearman value: 84.53930146434155 - type: euclidean_pearson value: 85.67420491389697 - type: euclidean_spearman value: 84.53929771783851 - type: manhattan_pearson value: 85.74306784515618 - type: manhattan_spearman value: 84.7399304675314 - task: type: STS dataset: type: mteb/sts15-sts name: MTEB STS15 config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 89.86138395166455 - type: cos_sim_spearman value: 90.42577823022054 - type: euclidean_pearson value: 89.8787763797515 - type: euclidean_spearman value: 90.42577823022054 - type: manhattan_pearson value: 89.9592937492158 - type: manhattan_spearman value: 90.63535505335524 - task: type: STS dataset: type: mteb/sts16-sts name: MTEB STS16 config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 86.5176674585941 - type: cos_sim_spearman value: 87.6842917085397 - type: euclidean_pearson value: 86.70213081520711 - type: euclidean_spearman value: 87.6842917085397 - type: manhattan_pearson value: 86.83702628983627 - type: manhattan_spearman value: 87.87791000374443 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (ko-ko) config: ko-ko split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 83.86395454805867 - type: cos_sim_spearman value: 83.69454595252267 - type: euclidean_pearson value: 83.04743892608313 - type: euclidean_spearman value: 83.69454026433006 - type: manhattan_pearson value: 83.4032095553322 - type: manhattan_spearman value: 84.11527379013802 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (ar-ar) config: ar-ar split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 81.80249894729546 - type: cos_sim_spearman value: 81.87004960533409 - type: euclidean_pearson value: 80.0392760044179 - type: euclidean_spearman value: 81.87004960533409 - type: manhattan_pearson value: 80.38096542355912 - type: manhattan_spearman value: 82.40774679630341 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-ar) config: en-ar split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 77.6158201787172 - type: cos_sim_spearman value: 77.934651044009 - type: euclidean_pearson value: 77.7874683895269 - type: euclidean_spearman value: 77.934651044009 - type: manhattan_pearson value: 78.36151849193052 - type: manhattan_spearman value: 78.52439586349938 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-de) config: en-de split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 87.04363311392207 - type: cos_sim_spearman value: 87.30483659369973 - type: euclidean_pearson value: 87.62634489502616 - type: euclidean_spearman value: 87.30483659369973 - type: manhattan_pearson value: 88.02340837141445 - type: manhattan_spearman value: 87.55012003294 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-en) config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 91.69172851958248 - type: cos_sim_spearman value: 91.7546879482416 - type: euclidean_pearson value: 91.84843039183963 - type: euclidean_spearman value: 91.7546879482416 - type: manhattan_pearson value: 91.72325753804357 - type: manhattan_spearman value: 91.55330259513397 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-tr) config: en-tr split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 73.95572901084864 - type: cos_sim_spearman value: 72.56217821552626 - type: euclidean_pearson value: 74.24242980323574 - type: euclidean_spearman value: 72.56217821552626 - type: manhattan_pearson value: 74.57473362519922 - type: manhattan_spearman value: 72.76048826648497 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (es-en) config: es-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 86.93329396008296 - type: cos_sim_spearman value: 88.2406635486219 - type: euclidean_pearson value: 87.49687343908533 - type: euclidean_spearman value: 88.2406635486219 - type: manhattan_pearson value: 88.14088309231084 - type: manhattan_spearman value: 88.93314020908534 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (es-es) config: es-es split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 88.70124451546057 - type: cos_sim_spearman value: 87.45988160052252 - type: euclidean_pearson value: 88.44395505247728 - type: euclidean_spearman value: 87.45988160052252 - type: manhattan_pearson value: 88.69269783495425 - type: manhattan_spearman value: 87.65383425621 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (fr-en) config: fr-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 87.64109149761346 - type: cos_sim_spearman value: 88.06459637689733 - type: euclidean_pearson value: 88.02313315797703 - type: euclidean_spearman value: 88.06459637689733 - type: manhattan_pearson value: 88.28328539133253 - type: manhattan_spearman value: 88.06605708379142 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (it-en) config: it-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 88.9040028177525 - type: cos_sim_spearman value: 89.68152202933464 - type: euclidean_pearson value: 89.23684469601253 - type: euclidean_spearman value: 89.68152202933464 - type: manhattan_pearson value: 89.59504307277454 - type: manhattan_spearman value: 89.88060100313582 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (nl-en) config: nl-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 87.69891585325125 - type: cos_sim_spearman value: 88.25252785071736 - type: euclidean_pearson value: 87.99932873748662 - type: euclidean_spearman value: 88.25252785071736 - type: manhattan_pearson value: 88.26959683009446 - type: manhattan_spearman value: 88.32583227300715 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (en) config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 67.53235909794135 - type: cos_sim_spearman value: 66.97521740529574 - type: euclidean_pearson value: 68.19502223613912 - type: euclidean_spearman value: 66.97521740529574 - type: manhattan_pearson value: 68.39070714774539 - type: manhattan_spearman value: 67.1072812364868 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (de) config: de split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 43.715742021204775 - type: cos_sim_spearman value: 49.12255971271453 - type: euclidean_pearson value: 40.76848562610837 - type: euclidean_spearman value: 49.12255971271453 - type: manhattan_pearson value: 40.92204625614112 - type: manhattan_spearman value: 49.23333793661129 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (es) config: es split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 63.35268345563588 - type: cos_sim_spearman value: 66.99661626042061 - type: euclidean_pearson value: 65.85589122857066 - type: euclidean_spearman value: 66.99661626042061 - type: manhattan_pearson value: 66.78454301512294 - type: manhattan_spearman value: 67.17570330149233 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (pl) config: pl split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 33.36599908204445 - type: cos_sim_spearman value: 39.20768331939503 - type: euclidean_pearson value: 22.16066769530468 - type: euclidean_spearman value: 39.20768331939503 - type: manhattan_pearson value: 22.386053195546022 - type: manhattan_spearman value: 39.70172817465986 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (tr) config: tr split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 63.06813956986753 - type: cos_sim_spearman value: 68.72065117995668 - type: euclidean_pearson value: 66.97373456344194 - type: euclidean_spearman value: 68.72065117995668 - type: manhattan_pearson value: 67.34907265771595 - type: manhattan_spearman value: 68.73705769957843 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (ar) config: ar split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 47.17664865207108 - type: cos_sim_spearman value: 54.115568323148864 - type: euclidean_pearson value: 48.56418162879182 - type: euclidean_spearman value: 54.115568323148864 - type: manhattan_pearson value: 48.85951643453165 - type: manhattan_spearman value: 54.13599784169052 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (ru) config: ru split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 55.87514136275987 - type: cos_sim_spearman value: 60.82923573674973 - type: euclidean_pearson value: 53.724183308215615 - type: euclidean_spearman value: 60.82923573674973 - type: manhattan_pearson value: 53.954305573102445 - type: manhattan_spearman value: 60.957483900644526 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (zh) config: zh split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 59.55001413648593 - type: cos_sim_spearman value: 63.395777040381276 - type: euclidean_pearson value: 59.869972550293305 - type: euclidean_spearman value: 63.395777040381276 - type: manhattan_pearson value: 61.16195496847885 - type: manhattan_spearman value: 63.41968682525581 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (fr) config: fr split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 79.13334972675852 - type: cos_sim_spearman value: 79.86263136371802 - type: euclidean_pearson value: 78.2433603592541 - type: euclidean_spearman value: 79.86263136371802 - type: manhattan_pearson value: 78.87337106318412 - type: manhattan_spearman value: 80.31230584758441 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (de-en) config: de-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 63.559700748242356 - type: cos_sim_spearman value: 60.92342109509558 - type: euclidean_pearson value: 66.07256437521119 - type: euclidean_spearman value: 60.92342109509558 - type: manhattan_pearson value: 67.72769744612663 - type: manhattan_spearman value: 59.64714507774168 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (es-en) config: es-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 73.93491616145891 - type: cos_sim_spearman value: 75.84242594400156 - type: euclidean_pearson value: 74.87279745626121 - type: euclidean_spearman value: 75.84242594400156 - type: manhattan_pearson value: 76.47764144677505 - type: manhattan_spearman value: 77.08411157845183 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (it) config: it split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 72.75624124540954 - type: cos_sim_spearman value: 75.8667941654703 - type: euclidean_pearson value: 73.74314588451925 - type: euclidean_spearman value: 75.8667941654703 - type: manhattan_pearson value: 73.99641425871518 - type: manhattan_spearman value: 76.1982840205817 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (pl-en) config: pl-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 75.20898141298767 - type: cos_sim_spearman value: 73.18060375331436 - type: euclidean_pearson value: 75.44489280944619 - type: euclidean_spearman value: 73.18060375331436 - type: manhattan_pearson value: 75.65451039552286 - type: manhattan_spearman value: 72.97744006123156 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (zh-en) config: zh-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 72.04278252247816 - type: cos_sim_spearman value: 71.8846446821539 - type: euclidean_pearson value: 73.16043307050612 - type: euclidean_spearman value: 71.8846446821539 - type: manhattan_pearson value: 74.76905116839777 - type: manhattan_spearman value: 72.66237093518471 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (es-it) config: es-it split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 71.71033173838558 - type: cos_sim_spearman value: 75.043122881885 - type: euclidean_pearson value: 72.77579680345087 - type: euclidean_spearman value: 75.043122881885 - type: manhattan_pearson value: 72.99901534854922 - type: manhattan_spearman value: 75.15418335015957 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (de-fr) config: de-fr split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 55.75733447190482 - type: cos_sim_spearman value: 61.38968334176681 - type: euclidean_pearson value: 55.479231520643744 - type: euclidean_spearman value: 61.38968334176681 - type: manhattan_pearson value: 56.05230571465244 - type: manhattan_spearman value: 62.69383054007398 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (de-pl) config: de-pl split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 41.72244325050302 - type: cos_sim_spearman value: 54.47476909084119 - type: euclidean_pearson value: 43.94629756436873 - type: euclidean_spearman value: 54.47476909084119 - type: manhattan_pearson value: 46.36533046394657 - type: manhattan_spearman value: 54.87509243633636 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (fr-pl) config: fr-pl split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 70.75183711835146 - type: cos_sim_spearman value: 84.51542547285167 - type: euclidean_pearson value: 71.84188960126669 - type: euclidean_spearman value: 84.51542547285167 - type: manhattan_pearson value: 73.94847166379994 - type: manhattan_spearman value: 84.51542547285167 - task: type: STS dataset: type: C-MTEB/STSB name: MTEB STSB config: default split: test revision: None metrics: - type: cos_sim_pearson value: 81.78690149086131 - type: cos_sim_spearman value: 81.81202616916873 - type: euclidean_pearson value: 80.98792254251062 - type: euclidean_spearman value: 81.81202616916873 - type: manhattan_pearson value: 81.46953021346732 - type: manhattan_spearman value: 82.34259562492315 - task: type: STS dataset: type: mteb/stsbenchmark-sts name: MTEB STSBenchmark config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 87.68273341294419 - type: cos_sim_spearman value: 88.59927164210958 - type: euclidean_pearson value: 88.10745681818025 - type: euclidean_spearman value: 88.59927164210958 - type: manhattan_pearson value: 88.25166703784649 - type: manhattan_spearman value: 88.85343247873482 - task: type: Reranking dataset: type: mteb/scidocs-reranking name: MTEB SciDocsRR config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 86.3340463345719 - type: mrr value: 96.5182611506141 - task: type: Retrieval dataset: type: scifact name: MTEB SciFact config: default split: test revision: None metrics: - type: map_at_1 value: 60.967000000000006 - type: map_at_10 value: 71.873 - type: map_at_100 value: 72.271 - type: map_at_1000 value: 72.292 - type: map_at_3 value: 69.006 - type: map_at_5 value: 70.856 - type: mrr_at_1 value: 63.666999999999994 - type: mrr_at_10 value: 72.929 - type: mrr_at_100 value: 73.26 - type: mrr_at_1000 value: 73.282 - type: mrr_at_3 value: 71.111 - type: mrr_at_5 value: 72.328 - type: ndcg_at_1 value: 63.666999999999994 - type: ndcg_at_10 value: 76.414 - type: ndcg_at_100 value: 78.152 - type: ndcg_at_1000 value: 78.604 - type: ndcg_at_3 value: 71.841 - type: ndcg_at_5 value: 74.435 - type: precision_at_1 value: 63.666999999999994 - type: precision_at_10 value: 10.067 - type: precision_at_100 value: 1.097 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 27.667 - type: precision_at_5 value: 18.467 - type: recall_at_1 value: 60.967000000000006 - type: recall_at_10 value: 88.922 - type: recall_at_100 value: 96.667 - type: recall_at_1000 value: 100.0 - type: recall_at_3 value: 77.228 - type: recall_at_5 value: 83.428 - task: type: PairClassification dataset: type: mteb/sprintduplicatequestions-pairclassification name: MTEB SprintDuplicateQuestions config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.82277227722773 - type: cos_sim_ap value: 95.66279851444406 - type: cos_sim_f1 value: 90.9367088607595 - type: cos_sim_precision value: 92.1025641025641 - type: cos_sim_recall value: 89.8 - type: dot_accuracy value: 99.82277227722773 - type: dot_ap value: 95.66279851444406 - type: dot_f1 value: 90.9367088607595 - type: dot_precision value: 92.1025641025641 - type: dot_recall value: 89.8 - type: euclidean_accuracy value: 99.82277227722773 - type: euclidean_ap value: 95.66279851444406 - type: euclidean_f1 value: 90.9367088607595 - type: euclidean_precision value: 92.1025641025641 - type: euclidean_recall value: 89.8 - type: manhattan_accuracy value: 99.82673267326733 - type: manhattan_ap value: 95.86094873177069 - type: manhattan_f1 value: 91.26788357178096 - type: manhattan_precision value: 90.06815968841285 - type: manhattan_recall value: 92.5 - type: max_accuracy value: 99.82673267326733 - type: max_ap value: 95.86094873177069 - type: max_f1 value: 91.26788357178096 - task: type: Clustering dataset: type: mteb/stackexchange-clustering name: MTEB StackExchangeClustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 73.09533925852372 - task: type: Clustering dataset: type: mteb/stackexchange-clustering-p2p name: MTEB StackExchangeClusteringP2P config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 45.90745648090035 - task: type: Reranking dataset: type: mteb/stackoverflowdupquestions-reranking name: MTEB StackOverflowDupQuestions config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 54.91147686504404 - type: mrr value: 56.03900082760377 - task: type: Summarization dataset: type: mteb/summeval name: MTEB SummEval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 31.46908662038217 - type: cos_sim_spearman value: 31.40325730367437 - type: dot_pearson value: 31.469083969291894 - type: dot_spearman value: 31.40325730367437 - task: type: Reranking dataset: type: C-MTEB/T2Reranking name: MTEB T2Reranking config: default split: dev revision: None metrics: - type: map value: 66.90300783402137 - type: mrr value: 77.06451972574179 - task: type: Retrieval dataset: type: C-MTEB/T2Retrieval name: MTEB T2Retrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 25.82 - type: map_at_10 value: 72.32300000000001 - type: map_at_100 value: 76.198 - type: map_at_1000 value: 76.281 - type: map_at_3 value: 50.719 - type: map_at_5 value: 62.326 - type: mrr_at_1 value: 86.599 - type: mrr_at_10 value: 89.751 - type: mrr_at_100 value: 89.876 - type: mrr_at_1000 value: 89.88000000000001 - type: mrr_at_3 value: 89.151 - type: mrr_at_5 value: 89.519 - type: ndcg_at_1 value: 86.599 - type: ndcg_at_10 value: 80.676 - type: ndcg_at_100 value: 85.03 - type: ndcg_at_1000 value: 85.854 - type: ndcg_at_3 value: 82.057 - type: ndcg_at_5 value: 80.537 - type: precision_at_1 value: 86.599 - type: precision_at_10 value: 40.373 - type: precision_at_100 value: 4.95 - type: precision_at_1000 value: 0.514 - type: precision_at_3 value: 71.918 - type: precision_at_5 value: 60.246 - type: recall_at_1 value: 25.82 - type: recall_at_10 value: 79.905 - type: recall_at_100 value: 93.88499999999999 - type: recall_at_1000 value: 98.073 - type: recall_at_3 value: 52.623 - type: recall_at_5 value: 66.233 - task: type: Classification dataset: type: C-MTEB/TNews-classification name: MTEB TNews config: default split: validation revision: None metrics: - type: accuracy value: 47.050000000000004 - type: f1 value: 45.704071498353294 - task: type: Retrieval dataset: type: trec-covid name: MTEB TRECCOVID config: default split: test revision: None metrics: - type: map_at_1 value: 0.243 - type: map_at_10 value: 2.278 - type: map_at_100 value: 14.221 - type: map_at_1000 value: 33.474 - type: map_at_3 value: 0.7270000000000001 - type: map_at_5 value: 1.183 - type: mrr_at_1 value: 94.0 - type: mrr_at_10 value: 97.0 - type: mrr_at_100 value: 97.0 - type: mrr_at_1000 value: 97.0 - type: mrr_at_3 value: 97.0 - type: mrr_at_5 value: 97.0 - type: ndcg_at_1 value: 90.0 - type: ndcg_at_10 value: 87.249 - type: ndcg_at_100 value: 67.876 - type: ndcg_at_1000 value: 59.205 - type: ndcg_at_3 value: 90.12299999999999 - type: ndcg_at_5 value: 89.126 - type: precision_at_1 value: 94.0 - type: precision_at_10 value: 90.8 - type: precision_at_100 value: 69.28 - type: precision_at_1000 value: 25.85 - type: precision_at_3 value: 94.667 - type: precision_at_5 value: 92.80000000000001 - type: recall_at_1 value: 0.243 - type: recall_at_10 value: 2.392 - type: recall_at_100 value: 16.982 - type: recall_at_1000 value: 55.214 - type: recall_at_3 value: 0.745 - type: recall_at_5 value: 1.2229999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (sqi-eng) config: sqi-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 70.5 - type: f1 value: 67.05501804646966 - type: precision value: 65.73261904761904 - type: recall value: 70.5 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (fry-eng) config: fry-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 75.14450867052022 - type: f1 value: 70.98265895953759 - type: precision value: 69.26782273603082 - type: recall value: 75.14450867052022 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (kur-eng) config: kur-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 33.170731707317074 - type: f1 value: 29.92876500193573 - type: precision value: 28.669145894755648 - type: recall value: 33.170731707317074 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (tur-eng) config: tur-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.5 - type: f1 value: 94.13333333333333 - type: precision value: 93.46666666666667 - type: recall value: 95.5 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (deu-eng) config: deu-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 99.6 - type: f1 value: 99.46666666666665 - type: precision value: 99.4 - type: recall value: 99.6 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (nld-eng) config: nld-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.2 - type: f1 value: 96.39999999999999 - type: precision value: 96.0 - type: recall value: 97.2 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ron-eng) config: ron-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.5 - type: f1 value: 92.99666666666667 - type: precision value: 92.31666666666666 - type: recall value: 94.5 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ang-eng) config: ang-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 85.82089552238806 - type: f1 value: 81.59203980099502 - type: precision value: 79.60199004975124 - type: recall value: 85.82089552238806 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ido-eng) config: ido-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 79.5 - type: f1 value: 75.11246031746032 - type: precision value: 73.38734126984127 - type: recall value: 79.5 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (jav-eng) config: jav-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 44.390243902439025 - type: f1 value: 38.48896631823461 - type: precision value: 36.57220286488579 - type: recall value: 44.390243902439025 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (isl-eng) config: isl-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.2 - type: f1 value: 87.57333333333334 - type: precision value: 86.34166666666665 - type: recall value: 90.2 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (slv-eng) config: slv-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.82138517618469 - type: f1 value: 85.98651854423423 - type: precision value: 84.79257073424753 - type: recall value: 88.82138517618469 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (cym-eng) config: cym-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 77.04347826086956 - type: f1 value: 72.32108147606868 - type: precision value: 70.37207357859532 - type: recall value: 77.04347826086956 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (kaz-eng) config: kaz-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 53.04347826086957 - type: f1 value: 46.88868184955141 - type: precision value: 44.71730105643149 - type: recall value: 53.04347826086957 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (est-eng) config: est-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 68.0 - type: f1 value: 62.891813186813195 - type: precision value: 61.037906162464985 - type: recall value: 68.0 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (heb-eng) config: heb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 86.3 - type: f1 value: 82.82000000000001 - type: precision value: 81.25690476190475 - type: recall value: 86.3 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (gla-eng) config: gla-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 68.87816646562122 - type: f1 value: 63.53054933272062 - type: precision value: 61.47807816331196 - type: recall value: 68.87816646562122 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (mar-eng) config: mar-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 74.4 - type: f1 value: 68.99388888888889 - type: precision value: 66.81035714285713 - type: recall value: 74.4 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (lat-eng) config: lat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.5 - type: f1 value: 87.93666666666667 - type: precision value: 86.825 - type: recall value: 90.5 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (bel-eng) config: bel-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.7 - type: f1 value: 88.09 - type: precision value: 86.85833333333333 - type: recall value: 90.7 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (pms-eng) config: pms-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 67.61904761904762 - type: f1 value: 62.30239247214037 - type: precision value: 60.340702947845806 - type: recall value: 67.61904761904762 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (gle-eng) config: gle-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 77.9 - type: f1 value: 73.81285714285714 - type: precision value: 72.21570818070818 - type: recall value: 77.9 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (pes-eng) config: pes-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 91.8 - type: f1 value: 89.66666666666667 - type: precision value: 88.66666666666666 - type: recall value: 91.8 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (nob-eng) config: nob-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.6 - type: f1 value: 96.85666666666665 - type: precision value: 96.50833333333333 - type: recall value: 97.6 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (bul-eng) config: bul-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.39999999999999 - type: f1 value: 93.98333333333333 - type: precision value: 93.30000000000001 - type: recall value: 95.39999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (cbk-eng) config: cbk-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 85.0 - type: f1 value: 81.31538461538462 - type: precision value: 79.70666666666666 - type: recall value: 85.0 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (hun-eng) config: hun-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 91.60000000000001 - type: f1 value: 89.81888888888888 - type: precision value: 89.08583333333333 - type: recall value: 91.60000000000001 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (uig-eng) config: uig-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 44.3 - type: f1 value: 38.8623088023088 - type: precision value: 37.03755623461505 - type: recall value: 44.3 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (rus-eng) config: rus-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.19999999999999 - type: f1 value: 93.75 - type: precision value: 93.05 - type: recall value: 95.19999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (spa-eng) config: spa-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 99.1 - type: f1 value: 98.8 - type: precision value: 98.65 - type: recall value: 99.1 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (hye-eng) config: hye-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 69.6765498652291 - type: f1 value: 63.991785393402644 - type: precision value: 61.7343729944808 - type: recall value: 69.6765498652291 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (tel-eng) config: tel-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 50.0 - type: f1 value: 42.79341029341029 - type: precision value: 40.25098358431692 - type: recall value: 50.0 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (afr-eng) config: afr-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 89.7 - type: f1 value: 87.19023809523809 - type: precision value: 86.12595238095237 - type: recall value: 89.7 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (mon-eng) config: mon-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 42.72727272727273 - type: f1 value: 37.78789518562245 - type: precision value: 36.24208471267295 - type: recall value: 42.72727272727273 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (arz-eng) config: arz-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 75.26205450733752 - type: f1 value: 70.72842833849123 - type: precision value: 68.93256464011182 - type: recall value: 75.26205450733752 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (hrv-eng) config: hrv-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.19999999999999 - type: f1 value: 93.96666666666668 - type: precision value: 93.42 - type: recall value: 95.19999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (nov-eng) config: nov-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 76.26459143968872 - type: f1 value: 72.40190419178747 - type: precision value: 70.84954604409856 - type: recall value: 76.26459143968872 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (gsw-eng) config: gsw-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 59.82905982905983 - type: f1 value: 52.2100122100122 - type: precision value: 49.52516619183286 - type: recall value: 59.82905982905983 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (nds-eng) config: nds-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 81.69999999999999 - type: f1 value: 77.41714285714286 - type: precision value: 75.64833333333334 - type: recall value: 81.69999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ukr-eng) config: ukr-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.5 - type: f1 value: 94.45 - type: precision value: 93.93333333333334 - type: recall value: 95.5 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (uzb-eng) config: uzb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 58.41121495327103 - type: f1 value: 52.73495974430554 - type: precision value: 50.717067200712066 - type: recall value: 58.41121495327103 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (lit-eng) config: lit-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 73.3 - type: f1 value: 69.20371794871795 - type: precision value: 67.6597557997558 - type: recall value: 73.3 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ina-eng) config: ina-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.5 - type: f1 value: 95.51666666666667 - type: precision value: 95.05 - type: recall value: 96.5 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (lfn-eng) config: lfn-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 78.4 - type: f1 value: 73.88856643356644 - type: precision value: 72.01373015873016 - type: recall value: 78.4 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (zsm-eng) config: zsm-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.3 - type: f1 value: 94.09666666666668 - type: precision value: 93.53333333333332 - type: recall value: 95.3 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ita-eng) config: ita-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.7 - type: f1 value: 91.94 - type: precision value: 91.10833333333333 - type: recall value: 93.7 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (cmn-eng) config: cmn-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.8 - type: f1 value: 95.89999999999999 - type: precision value: 95.46666666666668 - type: recall value: 96.8 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (lvs-eng) config: lvs-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 70.5 - type: f1 value: 66.00635642135641 - type: precision value: 64.36345238095238 - type: recall value: 70.5 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (glg-eng) config: glg-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.4 - type: f1 value: 90.44388888888889 - type: precision value: 89.5767857142857 - type: recall value: 92.4 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ceb-eng) config: ceb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 48.0 - type: f1 value: 43.15372775372776 - type: precision value: 41.53152510162313 - type: recall value: 48.0 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (bre-eng) config: bre-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 16.7 - type: f1 value: 14.198431372549017 - type: precision value: 13.411765873015872 - type: recall value: 16.7 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ben-eng) config: ben-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 85.7 - type: f1 value: 81.81666666666666 - type: precision value: 80.10833333333332 - type: recall value: 85.7 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (swg-eng) config: swg-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 69.64285714285714 - type: f1 value: 64.745670995671 - type: precision value: 62.916666666666664 - type: recall value: 69.64285714285714 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (arq-eng) config: arq-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 54.665203073545555 - type: f1 value: 48.55366630916923 - type: precision value: 46.35683318998357 - type: recall value: 54.665203073545555 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (kab-eng) config: kab-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 4.8 - type: f1 value: 3.808587223587223 - type: precision value: 3.5653174603174604 - type: recall value: 4.8 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (fra-eng) config: fra-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.6 - type: f1 value: 95.77333333333333 - type: precision value: 95.39166666666667 - type: recall value: 96.6 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (por-eng) config: por-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.39999999999999 - type: f1 value: 94.44 - type: precision value: 93.975 - type: recall value: 95.39999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (tat-eng) config: tat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 42.0 - type: f1 value: 37.024908424908425 - type: precision value: 35.365992063492065 - type: recall value: 42.0 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (oci-eng) config: oci-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 66.7 - type: f1 value: 62.20460835058661 - type: precision value: 60.590134587634594 - type: recall value: 66.7 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (pol-eng) config: pol-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.3 - type: f1 value: 96.46666666666667 - type: precision value: 96.06666666666668 - type: recall value: 97.3 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (war-eng) config: war-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 47.3 - type: f1 value: 41.96905408317173 - type: precision value: 40.18741402116402 - type: recall value: 47.3 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (aze-eng) config: aze-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 80.2 - type: f1 value: 76.22690476190476 - type: precision value: 74.63539682539682 - type: recall value: 80.2 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (vie-eng) config: vie-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.0 - type: f1 value: 94.83333333333333 - type: precision value: 94.26666666666668 - type: recall value: 96.0 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (nno-eng) config: nno-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 89.7 - type: f1 value: 87.24333333333334 - type: precision value: 86.17 - type: recall value: 89.7 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (cha-eng) config: cha-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 50.36496350364964 - type: f1 value: 44.795520780922246 - type: precision value: 43.09002433090024 - type: recall value: 50.36496350364964 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (mhr-eng) config: mhr-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 18.8 - type: f1 value: 16.242864357864356 - type: precision value: 15.466596638655464 - type: recall value: 18.8 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (dan-eng) config: dan-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.19999999999999 - type: f1 value: 93.92333333333333 - type: precision value: 93.30833333333332 - type: recall value: 95.19999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ell-eng) config: ell-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.4 - type: f1 value: 91.42333333333333 - type: precision value: 90.50833333333334 - type: recall value: 93.4 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (amh-eng) config: amh-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 26.190476190476193 - type: f1 value: 22.05208151636723 - type: precision value: 21.09292328042328 - type: recall value: 26.190476190476193 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (pam-eng) config: pam-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 17.2 - type: f1 value: 14.021009731460952 - type: precision value: 13.1389886698243 - type: recall value: 17.2 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (hsb-eng) config: hsb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 78.67494824016563 - type: f1 value: 74.24430641821947 - type: precision value: 72.50747642051991 - type: recall value: 78.67494824016563 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (srp-eng) config: srp-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.19999999999999 - type: f1 value: 92.54 - type: precision value: 91.75833333333334 - type: recall value: 94.19999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (epo-eng) config: epo-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.2 - type: f1 value: 87.78666666666666 - type: precision value: 86.69833333333334 - type: recall value: 90.2 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (kzj-eng) config: kzj-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 14.7 - type: f1 value: 12.19206214842218 - type: precision value: 11.526261904761904 - type: recall value: 14.7 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (awa-eng) config: awa-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 73.16017316017316 - type: f1 value: 67.44858316286889 - type: precision value: 65.23809523809523 - type: recall value: 73.16017316017316 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (fao-eng) config: fao-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 75.19083969465649 - type: f1 value: 70.33078880407125 - type: precision value: 68.3969465648855 - type: recall value: 75.19083969465649 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (mal-eng) config: mal-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 62.154294032023294 - type: f1 value: 55.86030821838681 - type: precision value: 53.53509623160277 - type: recall value: 62.154294032023294 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ile-eng) config: ile-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 86.8 - type: f1 value: 83.9652380952381 - type: precision value: 82.84242424242424 - type: recall value: 86.8 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (bos-eng) config: bos-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.50282485875707 - type: f1 value: 91.54425612052731 - type: precision value: 90.65442561205272 - type: recall value: 93.50282485875707 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (cor-eng) config: cor-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 11.4 - type: f1 value: 9.189775870222714 - type: precision value: 8.66189886502811 - type: recall value: 11.4 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (cat-eng) config: cat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.4 - type: f1 value: 91.88666666666666 - type: precision value: 91.21444444444444 - type: recall value: 93.4 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (eus-eng) config: eus-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 46.0 - type: f1 value: 40.51069226095542 - type: precision value: 38.57804926010808 - type: recall value: 46.0 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (yue-eng) config: yue-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 91.0 - type: f1 value: 89.11333333333333 - type: precision value: 88.27000000000001 - type: recall value: 91.0 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (swe-eng) config: swe-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.39999999999999 - type: f1 value: 92.95 - type: precision value: 92.27000000000001 - type: recall value: 94.39999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (dtp-eng) config: dtp-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 14.2 - type: f1 value: 11.73701698770113 - type: precision value: 11.079207014736676 - type: recall value: 14.2 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (kat-eng) config: kat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 65.14745308310992 - type: f1 value: 59.665707393589415 - type: precision value: 57.560853653346946 - type: recall value: 65.14745308310992 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (jpn-eng) config: jpn-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.39999999999999 - type: f1 value: 94.0 - type: precision value: 93.33333333333333 - type: recall value: 95.39999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (csb-eng) config: csb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 69.56521739130434 - type: f1 value: 62.92490118577074 - type: precision value: 60.27009222661397 - type: recall value: 69.56521739130434 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (xho-eng) config: xho-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 40.140845070422536 - type: f1 value: 35.96411804158283 - type: precision value: 34.89075869357559 - type: recall value: 40.140845070422536 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (orv-eng) config: orv-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 65.86826347305389 - type: f1 value: 59.646248628284546 - type: precision value: 57.22982606216139 - type: recall value: 65.86826347305389 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ind-eng) config: ind-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.89999999999999 - type: f1 value: 93.48333333333333 - type: precision value: 92.83666666666667 - type: recall value: 94.89999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (tuk-eng) config: tuk-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 47.783251231527096 - type: f1 value: 42.006447302013804 - type: precision value: 40.12747105111637 - type: recall value: 47.783251231527096 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (max-eng) config: max-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 69.71830985915493 - type: f1 value: 64.80266212660578 - type: precision value: 63.08098591549296 - type: recall value: 69.71830985915493 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (swh-eng) config: swh-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 67.94871794871796 - type: f1 value: 61.59912309912309 - type: precision value: 59.17338217338218 - type: recall value: 67.94871794871796 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (hin-eng) config: hin-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.39999999999999 - type: f1 value: 95.28333333333335 - type: precision value: 94.75 - type: recall value: 96.39999999999999 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (dsb-eng) config: dsb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 70.14613778705638 - type: f1 value: 65.4349338900487 - type: precision value: 63.57599255302805 - type: recall value: 70.14613778705638 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ber-eng) config: ber-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 9.2 - type: f1 value: 7.622184434339607 - type: precision value: 7.287048159682417 - type: recall value: 9.2 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (tam-eng) config: tam-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 77.85016286644951 - type: f1 value: 72.83387622149837 - type: precision value: 70.58450959102424 - type: recall value: 77.85016286644951 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (slk-eng) config: slk-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.8 - type: f1 value: 88.84333333333333 - type: precision value: 87.96666666666665 - type: recall value: 90.8 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (tgl-eng) config: tgl-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.6 - type: f1 value: 93.14 - type: precision value: 92.49833333333333 - type: recall value: 94.6 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ast-eng) config: ast-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 84.25196850393701 - type: f1 value: 80.94488188976378 - type: precision value: 79.65879265091863 - type: recall value: 84.25196850393701 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (mkd-eng) config: mkd-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 89.5 - type: f1 value: 86.89666666666666 - type: precision value: 85.7 - type: recall value: 89.5 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (khm-eng) config: khm-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 42.797783933518005 - type: f1 value: 37.30617360155193 - type: precision value: 35.34933825792552 - type: recall value: 42.797783933518005 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ces-eng) config: ces-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.1 - type: f1 value: 94.93333333333332 - type: precision value: 94.38333333333333 - type: recall value: 96.1 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (tzl-eng) config: tzl-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 54.807692307692314 - type: f1 value: 49.506903353057204 - type: precision value: 47.54807692307693 - type: recall value: 54.807692307692314 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (urd-eng) config: urd-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 87.1 - type: f1 value: 83.61857142857143 - type: precision value: 81.975 - type: recall value: 87.1 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (ara-eng) config: ara-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 91.10000000000001 - type: f1 value: 88.76333333333332 - type: precision value: 87.67 - type: recall value: 91.10000000000001 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (kor-eng) config: kor-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.10000000000001 - type: f1 value: 91.28999999999999 - type: precision value: 90.44500000000001 - type: recall value: 93.10000000000001 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (yid-eng) config: yid-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 39.97641509433962 - type: f1 value: 33.12271889998028 - type: precision value: 30.95185381542554 - type: recall value: 39.97641509433962 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (fin-eng) config: fin-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.60000000000001 - type: f1 value: 90.69 - type: precision value: 89.84500000000001 - type: recall value: 92.60000000000001 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (tha-eng) config: tha-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.07299270072993 - type: f1 value: 93.64355231143554 - type: precision value: 92.94403892944038 - type: recall value: 95.07299270072993 - task: type: BitextMining dataset: type: mteb/tatoeba-bitext-mining name: MTEB Tatoeba (wuu-eng) config: wuu-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 91.9 - type: f1 value: 89.61333333333333 - type: precision value: 88.53333333333333 - type: recall value: 91.9 - task: type: Clustering dataset: type: C-MTEB/ThuNewsClusteringP2P name: MTEB ThuNewsClusteringP2P config: default split: test revision: None metrics: - type: v_measure value: 64.68478289806511 - task: type: Clustering dataset: type: C-MTEB/ThuNewsClusteringS2S name: MTEB ThuNewsClusteringS2S config: default split: test revision: None metrics: - type: v_measure value: 57.53010296184097 - task: type: Retrieval dataset: type: webis-touche2020 name: MTEB Touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 2.519 - type: map_at_10 value: 10.31 - type: map_at_100 value: 16.027 - type: map_at_1000 value: 17.827 - type: map_at_3 value: 5.721 - type: map_at_5 value: 7.7829999999999995 - type: mrr_at_1 value: 34.694 - type: mrr_at_10 value: 52.642999999999994 - type: mrr_at_100 value: 53.366 - type: mrr_at_1000 value: 53.366 - type: mrr_at_3 value: 48.638999999999996 - type: mrr_at_5 value: 50.578 - type: ndcg_at_1 value: 31.633 - type: ndcg_at_10 value: 26.394000000000002 - type: ndcg_at_100 value: 36.41 - type: ndcg_at_1000 value: 49.206 - type: ndcg_at_3 value: 31.694 - type: ndcg_at_5 value: 29.529 - type: precision_at_1 value: 34.694 - type: precision_at_10 value: 23.469 - type: precision_at_100 value: 7.286 - type: precision_at_1000 value: 1.5610000000000002 - type: precision_at_3 value: 34.014 - type: precision_at_5 value: 29.796 - type: recall_at_1 value: 2.519 - type: recall_at_10 value: 17.091 - type: recall_at_100 value: 45.429 - type: recall_at_1000 value: 84.621 - type: recall_at_3 value: 7.208 - type: recall_at_5 value: 10.523 - task: type: Classification dataset: type: mteb/toxic_conversations_50k name: MTEB ToxicConversationsClassification config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 69.58659999999999 - type: ap value: 14.735696532619 - type: f1 value: 54.23517220069903 - task: type: Classification dataset: type: mteb/tweet_sentiment_extraction name: MTEB TweetSentimentExtractionClassification config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 63.723825693265425 - type: f1 value: 64.02405729449103 - task: type: Clustering dataset: type: mteb/twentynewsgroups-clustering name: MTEB TwentyNewsgroupsClustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 54.310161547491006 - task: type: PairClassification dataset: type: mteb/twittersemeval2015-pairclassification name: MTEB TwitterSemEval2015 config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 88.77630088812064 - type: cos_sim_ap value: 81.61725457333809 - type: cos_sim_f1 value: 74.91373801916932 - type: cos_sim_precision value: 72.63940520446097 - type: cos_sim_recall value: 77.33509234828496 - type: dot_accuracy value: 88.77630088812064 - type: dot_ap value: 81.61725317476251 - type: dot_f1 value: 74.91373801916932 - type: dot_precision value: 72.63940520446097 - type: dot_recall value: 77.33509234828496 - type: euclidean_accuracy value: 88.77630088812064 - type: euclidean_ap value: 81.61724596869566 - type: euclidean_f1 value: 74.91373801916932 - type: euclidean_precision value: 72.63940520446097 - type: euclidean_recall value: 77.33509234828496 - type: manhattan_accuracy value: 88.67497168742922 - type: manhattan_ap value: 81.430251048948 - type: manhattan_f1 value: 74.79593118171543 - type: manhattan_precision value: 71.3635274382938 - type: manhattan_recall value: 78.57519788918206 - type: max_accuracy value: 88.77630088812064 - type: max_ap value: 81.61725457333809 - type: max_f1 value: 74.91373801916932 - task: type: PairClassification dataset: type: mteb/twitterurlcorpus-pairclassification name: MTEB TwitterURLCorpus config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 89.85136026700819 - type: cos_sim_ap value: 87.74656687446567 - type: cos_sim_f1 value: 80.3221673073403 - type: cos_sim_precision value: 76.56871640957633 - type: cos_sim_recall value: 84.46258084385587 - type: dot_accuracy value: 89.85136026700819 - type: dot_ap value: 87.74656471395072 - type: dot_f1 value: 80.3221673073403 - type: dot_precision value: 76.56871640957633 - type: dot_recall value: 84.46258084385587 - type: euclidean_accuracy value: 89.85136026700819 - type: euclidean_ap value: 87.74656885754466 - type: euclidean_f1 value: 80.3221673073403 - type: euclidean_precision value: 76.56871640957633 - type: euclidean_recall value: 84.46258084385587 - type: manhattan_accuracy value: 89.86300306593705 - type: manhattan_ap value: 87.78807479093082 - type: manhattan_f1 value: 80.31663429471911 - type: manhattan_precision value: 76.63472970137772 - type: manhattan_recall value: 84.3701878657222 - type: max_accuracy value: 89.86300306593705 - type: max_ap value: 87.78807479093082 - type: max_f1 value: 80.3221673073403 - task: type: Retrieval dataset: type: C-MTEB/VideoRetrieval name: MTEB VideoRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 32.4 - type: map_at_10 value: 40.961999999999996 - type: map_at_100 value: 41.660000000000004 - type: map_at_1000 value: 41.721000000000004 - type: map_at_3 value: 38.550000000000004 - type: map_at_5 value: 40.06 - type: mrr_at_1 value: 32.4 - type: mrr_at_10 value: 40.961999999999996 - type: mrr_at_100 value: 41.660000000000004 - type: mrr_at_1000 value: 41.721000000000004 - type: mrr_at_3 value: 38.550000000000004 - type: mrr_at_5 value: 40.06 - type: ndcg_at_1 value: 32.4 - type: ndcg_at_10 value: 45.388 - type: ndcg_at_100 value: 49.012 - type: ndcg_at_1000 value: 50.659 - type: ndcg_at_3 value: 40.47 - type: ndcg_at_5 value: 43.232 - type: precision_at_1 value: 32.4 - type: precision_at_10 value: 5.94 - type: precision_at_100 value: 0.769 - type: precision_at_1000 value: 0.09 - type: precision_at_3 value: 15.333 - type: precision_at_5 value: 10.56 - type: recall_at_1 value: 32.4 - type: recall_at_10 value: 59.4 - type: recall_at_100 value: 76.9 - type: recall_at_1000 value: 90.0 - type: recall_at_3 value: 46.0 - type: recall_at_5 value: 52.800000000000004 - task: type: Classification dataset: type: C-MTEB/waimai-classification name: MTEB Waimai config: default split: test revision: None metrics: - type: accuracy value: 86.94000000000001 - type: ap value: 70.57373468481975 - type: f1 value: 85.26264784928323 language: - en license: mit --- ## E5-mistral-7b-instruct [Improving Text Embeddings with Large Language Models](https://arxiv.org/pdf/2401.00368.pdf). Liang Wang, Nan Yang, Xiaolong Huang, Linjun Yang, Rangan Majumder, Furu Wei, arXiv 2024 This model has 32 layers and the embedding size is 4096. ## Usage Below is an example to encode queries and passages from the MS-MARCO passage ranking dataset. ```python import torch import torch.nn.functional as F from torch import Tensor from transformers import AutoTokenizer, AutoModel def last_token_pool(last_hidden_states: Tensor, attention_mask: Tensor) -> Tensor: left_padding = (attention_mask[:, -1].sum() == attention_mask.shape[0]) if left_padding: return last_hidden_states[:, -1] else: sequence_lengths = attention_mask.sum(dim=1) - 1 batch_size = last_hidden_states.shape[0] return last_hidden_states[torch.arange(batch_size, device=last_hidden_states.device), sequence_lengths] def get_detailed_instruct(task_description: str, query: str) -> str: return f'Instruct: {task_description}\nQuery: {query}' # Each query must come with a one-sentence instruction that describes the task task = 'Given a web search query, retrieve relevant passages that answer the query' queries = [ get_detailed_instruct(task, 'how much protein should a female eat'), get_detailed_instruct(task, 'summit define') ] # No need to add instruction for retrieval documents documents = [ "As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.", "Definition of summit for English Language Learners. : 1 the highest point of a mountain : the top of a mountain. : 2 the highest level. : 3 a meeting or series of meetings between the leaders of two or more governments." ] input_texts = queries + documents tokenizer = AutoTokenizer.from_pretrained('intfloat/e5-mistral-7b-instruct') model = AutoModel.from_pretrained('intfloat/e5-mistral-7b-instruct') max_length = 4096 # Tokenize the input texts batch_dict = tokenizer(input_texts, max_length=max_length - 1, return_attention_mask=False, padding=False, truncation=True) # append eos_token_id to every input_ids batch_dict['input_ids'] = [input_ids + [tokenizer.eos_token_id] for input_ids in batch_dict['input_ids']] batch_dict = tokenizer.pad(batch_dict, padding=True, return_attention_mask=True, return_tensors='pt') outputs = model(**batch_dict) embeddings = last_token_pool(outputs.last_hidden_state, batch_dict['attention_mask']) # normalize embeddings embeddings = F.normalize(embeddings, p=2, dim=1) scores = (embeddings[:2] @ embeddings[2:].T) * 100 print(scores.tolist()) ``` ## Supported Languages This model is initialized from [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) and fine-tuned on a mixture of multilingual datasets. As a result, it has some multilingual capability. However, since Mistral-7B-v0.1 is mainly trained on English data, we recommend using this model for English only. For multilingual use cases, please refer to [multilingual-e5-large](https://huggingface.co/intfloat/multilingual-e5-large). ## MTEB Benchmark Evaluation Check out [unilm/e5](https://github.com/microsoft/unilm/tree/master/e5) to reproduce evaluation results on the [BEIR](https://arxiv.org/abs/2104.08663) and [MTEB benchmark](https://arxiv.org/abs/2210.07316). ## FAQ **1. Do I need to add instructions to the query?** Yes, this is how the model is trained, otherwise you will see a performance degradation. The task definition should be a one-sentence instruction that describes the task. This is a way to customize text embeddings for different scenarios through natural language instructions. Please check out [unilm/e5/utils.py](https://github.com/microsoft/unilm/blob/9c0f1ff7ca53431fe47d2637dfe253643d94185b/e5/utils.py#L106) for instructions we used for evaluation. On the other hand, there is no need to add instructions to the document side. **2. Why are my reproduced results slightly different from reported in the model card?** Different versions of `transformers` and `pytorch` could cause negligible but non-zero performance differences. **3. Where are the LoRA-only weights?** You can find the LoRA-only weights at [https://huggingface.co/intfloat/e5-mistral-7b-instruct/tree/main/lora](https://huggingface.co/intfloat/e5-mistral-7b-instruct/tree/main/lora). ## Citation If you find our paper or models helpful, please consider cite as follows: ```bibtex @article{wang2023improving, title={Improving Text Embeddings with Large Language Models}, author={Wang, Liang and Yang, Nan and Huang, Xiaolong and Yang, Linjun and Majumder, Rangan and Wei, Furu}, journal={arXiv preprint arXiv:2401.00368}, year={2023} } @article{wang2022text, title={Text Embeddings by Weakly-Supervised Contrastive Pre-training}, author={Wang, Liang and Yang, Nan and Huang, Xiaolong and Jiao, Binxing and Yang, Linjun and Jiang, Daxin and Majumder, Rangan and Wei, Furu}, journal={arXiv preprint arXiv:2212.03533}, year={2022} } ``` ## Limitations Using this model for inputs longer than 4096 tokens is not recommended. This model's multilingual capability is still inferior to [multilingual-e5-large](https://huggingface.co/intfloat/multilingual-e5-large) for some cases.
flair/pos-english
flair
"2023-04-10T15:54:36Z"
114,796
25
flair
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "dataset:ontonotes", "has_space", "region:us" ]
token-classification
"2022-03-02T23:29:05Z"
--- tags: - flair - token-classification - sequence-tagger-model language: en datasets: - ontonotes inference: false --- ## English Part-of-Speech Tagging in Flair (default model) This is the standard part-of-speech tagging model for English that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **98,19** (Ontonotes) Predicts fine-grained POS tags: | **tag** | **meaning** | |---------------------------------|-----------| |ADD | Email | |AFX | Affix | |CC | Coordinating conjunction | |CD | Cardinal number | |DT | Determiner | |EX | Existential there | |FW | Foreign word | |HYPH | Hyphen | |IN | Preposition or subordinating conjunction | |JJ | Adjective | |JJR |Adjective, comparative | |JJS | Adjective, superlative | |LS | List item marker | |MD | Modal | |NFP | Superfluous punctuation | |NN | Noun, singular or mass | |NNP |Proper noun, singular | |NNPS | Proper noun, plural | |NNS |Noun, plural | |PDT | Predeterminer | |POS | Possessive ending | |PRP | Personal pronoun | |PRP$ | Possessive pronoun | |RB | Adverb | |RBR | Adverb, comparative | |RBS | Adverb, superlative | |RP | Particle | |SYM | Symbol | |TO | to | |UH | Interjection | |VB | Verb, base form | |VBD | Verb, past tense | |VBG | Verb, gerund or present participle | |VBN | Verb, past participle | |VBP | Verb, non-3rd person singular present | |VBZ | Verb, 3rd person singular present | |WDT | Wh-determiner | |WP | Wh-pronoun | |WP$ | Possessive wh-pronoun | |WRB | Wh-adverb | |XX | Unknown | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/pos-english") # make example sentence sentence = Sentence("I love Berlin.") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('pos'): print(entity) ``` This yields the following output: ``` Span [1]: "I" [− Labels: PRP (1.0)] Span [2]: "love" [− Labels: VBP (1.0)] Span [3]: "Berlin" [− Labels: NNP (0.9999)] Span [4]: "." [− Labels: . (1.0)] ``` So, the word "*I*" is labeled as a **pronoun** (PRP), "*love*" is labeled as a **verb** (VBP) and "*Berlin*" is labeled as a **proper noun** (NNP) in the sentence "*I love Berlin*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import ColumnCorpus from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself) corpus: Corpus = ColumnCorpus( "resources/tasks/onto-ner", column_format={0: "text", 1: "pos", 2: "upos", 3: "ner"}, tag_to_bioes="ner", ) # 2. what tag do we want to predict? tag_type = 'pos' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # contextual string embeddings, forward FlairEmbeddings('news-forward'), # contextual string embeddings, backward FlairEmbeddings('news-backward'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/pos-english', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
EleutherAI/pythia-31m
EleutherAI
"2023-07-26T17:37:54Z"
114,661
2
transformers
[ "transformers", "pytorch", "safetensors", "gpt_neox", "text-generation", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2023-07-19T13:59:19Z"
Entry not found
VoVanPhuc/sup-SimCSE-VietNamese-phobert-base
VoVanPhuc
"2024-04-10T09:01:08Z"
114,425
15
transformers
[ "transformers", "pytorch", "safetensors", "roberta", "sentence-similarity", "vi", "arxiv:2104.08821", "endpoints_compatible", "has_space", "region:us" ]
sentence-similarity
"2022-03-02T23:29:05Z"
--- language: - vi pipeline_tag: sentence-similarity --- #### Table of contents 1. [Introduction](#introduction) 2. [Pretrain model](#models) 3. [Using SimeCSE_Vietnamese with `sentences-transformers`](#sentences-transformers) - [Installation](#install1) - [Example usage](#usage1) 4. [Using SimeCSE_Vietnamese with `transformers`](#transformers) - [Installation](#install2) - [Example usage](#usage2) # <a name="introduction"></a> SimeCSE_Vietnamese: Simple Contrastive Learning of Sentence Embeddings with Vietnamese Pre-trained SimeCSE_Vietnamese models are the state-of-the-art of Sentence Embeddings with Vietnamese : - SimeCSE_Vietnamese pre-training approach is based on [SimCSE](https://arxiv.org/abs/2104.08821) which optimizes the SimeCSE_Vietnamese pre-training procedure for more robust performance. - SimeCSE_Vietnamese encode input sentences using a pre-trained language model such as [PhoBert](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) - SimeCSE_Vietnamese works with both unlabeled and labeled data. ## Pre-trained models <a name="models"></a> Model | #params | Arch. ---|---|--- [`VoVanPhuc/sup-SimCSE-VietNamese-phobert-base`](https://huggingface.co/VoVanPhuc/sup-SimCSE-VietNamese-phobert-base) | 135M | base [`VoVanPhuc/unsup-SimCSE-VietNamese-phobert-base`](https://huggingface.co/VoVanPhuc/unsup-SimCSE-VietNamese-phobert-base) | 135M | base ## <a name="sentences-transformers"></a> Using SimeCSE_Vietnamese with `sentences-transformers` ### Installation <a name="install1"></a> - Install `sentence-transformers`: - `pip install -U sentence-transformers` - Install `pyvi` to word segment: - `pip install pyvi` ### Example usage <a name="usage1"></a> ```python from sentence_transformers import SentenceTransformer from pyvi.ViTokenizer import tokenize model = SentenceTransformer('VoVanPhuc/sup-SimCSE-VietNamese-phobert-base') sentences = ['Kẻ đánh bom đinh tồi tệ nhất nước Anh.', 'Nghệ sĩ làm thiện nguyện - minh bạch là việc cấp thiết.', 'Bắc Giang tăng khả năng điều trị và xét nghiệm.', 'HLV futsal Việt Nam tiết lộ lý do hạ Lebanon.', 'việc quan trọng khi kêu gọi quyên góp từ thiện là phải minh bạch, giải ngân kịp thời.', '20% bệnh nhân Covid-19 có thể nhanh chóng trở nặng.', 'Thái Lan thua giao hữu trước vòng loại World Cup.', 'Cựu tuyển thủ Nguyễn Bảo Quân: May mắn ủng hộ futsal Việt Nam', 'Chủ ki-ốt bị đâm chết trong chợ đầu mối lớn nhất Thanh Hoá.', 'Bắn chết người trong cuộc rượt đuổi trên sông.' ] sentences = [tokenize(sentence) for sentence in sentences] embeddings = model.encode(sentences) ``` ## <a name="sentences-transformers"></a> Using SimeCSE_Vietnamese with `transformers` ### Installation <a name="install2"></a> - Install `transformers`: - `pip install -U transformers` - Install `pyvi` to word segment: - `pip install pyvi` ### Example usage <a name="usage2"></a> ```python import torch from transformers import AutoModel, AutoTokenizer from pyvi.ViTokenizer import tokenize PhobertTokenizer = AutoTokenizer.from_pretrained("VoVanPhuc/sup-SimCSE-VietNamese-phobert-base") model = AutoModel.from_pretrained("VoVanPhuc/sup-SimCSE-VietNamese-phobert-base") sentences = ['Kẻ đánh bom đinh tồi tệ nhất nước Anh.', 'Nghệ sĩ làm thiện nguyện - minh bạch là việc cấp thiết.', 'Bắc Giang tăng khả năng điều trị và xét nghiệm.', 'HLV futsal Việt Nam tiết lộ lý do hạ Lebanon.', 'việc quan trọng khi kêu gọi quyên góp từ thiện là phải minh bạch, giải ngân kịp thời.', '20% bệnh nhân Covid-19 có thể nhanh chóng trở nặng.', 'Thái Lan thua giao hữu trước vòng loại World Cup.', 'Cựu tuyển thủ Nguyễn Bảo Quân: May mắn ủng hộ futsal Việt Nam', 'Chủ ki-ốt bị đâm chết trong chợ đầu mối lớn nhất Thanh Hoá.', 'Bắn chết người trong cuộc rượt đuổi trên sông.' ] sentences = [tokenize(sentence) for sentence in sentences] inputs = PhobertTokenizer(sentences, padding=True, truncation=True, return_tensors="pt") with torch.no_grad(): embeddings = model(**inputs, output_hidden_states=True, return_dict=True).pooler_output ``` ## Quick Start [Open In Colab](https://colab.research.google.com/drive/12__EXJoQYHe9nhi4aXLTf9idtXT8yr7H?usp=sharing) ## Citation @article{gao2021simcse, title={{SimCSE}: Simple Contrastive Learning of Sentence Embeddings}, author={Gao, Tianyu and Yao, Xingcheng and Chen, Danqi}, journal={arXiv preprint arXiv:2104.08821}, year={2021} } @inproceedings{phobert, title = {{PhoBERT: Pre-trained language models for Vietnamese}}, author = {Dat Quoc Nguyen and Anh Tuan Nguyen}, booktitle = {Findings of the Association for Computational Linguistics: EMNLP 2020}, year = {2020}, pages = {1037--1042} }
monster-labs/control_v1p_sd15_qrcode_monster
monster-labs
"2023-07-21T11:35:31Z"
114,389
1,212
diffusers
[ "diffusers", "safetensors", "stable-diffusion", "controlnet", "qrcode", "en", "license:openrail++", "has_space", "region:us" ]
null
"2023-06-24T15:07:20Z"
--- tags: - stable-diffusion - controlnet - qrcode license: openrail++ language: - en --- # Controlnet QR Code Monster v2 For SD-1.5 ![QR code in shape of a blue monster, reading "https://qrcode.monster"](images/monster.png) ## Model Description This model is made to generate creative QR codes that still scan. Keep in mind that not all generated codes might be readable, but you can try different parameters and prompts to get the desired results. **NEW VERSION** Introducing the upgraded version of our model - Controlnet QR code Monster v2. V2 is a huge upgrade over v1, for scannability AND creativity. QR codes can now seamlessly blend the image by using a gray-colored background (#808080). As with the former version, the readability of some generated codes may vary, however playing around with parameters and prompts could yield better results. You can find in in the `v2/` subfolder. ## How to Use - **Condition**: QR codes are passed as condition images with a module size of 16px. Use a higher error correction level to make it easier to read (sometimes a lower level can be easier to read if smaller in size). Use a gray background for the rest of the image to make the code integrate better. - **Prompts**: Use a prompt to guide the QR code generation. The output will highly depend on the given prompt. Some seem to be really easily accepted by the qr code process, some will require careful tweaking to get good results. - **Controlnet guidance scale**: Set the controlnet guidance scale value: - High values: The generated QR code will be more readable. - Low values: The generated QR code will be more creative. ### Tips - For an optimally readable output, try generating multiple QR codes with similar parameters, then choose the best ones. - Use the Image-to-Image feature to improve the readability of a generated QR code: - Decrease the denoising strength to retain more of the original image. - Increase the controlnet guidance scale value for better readability. A typical workflow for "saving" a code would be : Max out the guidance scale and minimize the denoising strength, then bump the strength until the code scans. ## Example Outputs Here are some examples of creative, yet scannable QR codes produced by our model: ![City ruins with a building facade in shape of a QR code, reading "https://qrcode.monster"](images/architecture.png) ![QR code in shape of a tree, reading "https://qrcode.monster"](images/tree.png) ![A gothic sculpture in shape of a QR code, reading "https://qrcode.monster"](images/skulls.png) Feel free to experiment with prompts, parameters, and the Image-to-Image feature to achieve the desired QR code output. Good luck and have fun!
neuralmind/bert-large-portuguese-cased
neuralmind
"2021-05-20T01:31:09Z"
113,756
47
transformers
[ "transformers", "pytorch", "jax", "bert", "fill-mask", "pt", "dataset:brWaC", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
"2022-03-02T23:29:05Z"
--- language: pt license: mit tags: - bert - pytorch datasets: - brWaC --- # BERTimbau Large (aka "bert-large-portuguese-cased") ![Bert holding a berimbau](https://imgur.com/JZ7Hynh.jpg) ## Introduction BERTimbau Large is a pretrained BERT model for Brazilian Portuguese that achieves state-of-the-art performances on three downstream NLP tasks: Named Entity Recognition, Sentence Textual Similarity and Recognizing Textual Entailment. It is available in two sizes: Base and Large. For further information or requests, please go to [BERTimbau repository](https://github.com/neuralmind-ai/portuguese-bert/). ## Available models | Model | Arch. | #Layers | #Params | | ---------------------------------------- | ---------- | ------- | ------- | | `neuralmind/bert-base-portuguese-cased` | BERT-Base | 12 | 110M | | `neuralmind/bert-large-portuguese-cased` | BERT-Large | 24 | 335M | ## Usage ```python from transformers import AutoTokenizer # Or BertTokenizer from transformers import AutoModelForPreTraining # Or BertForPreTraining for loading pretraining heads from transformers import AutoModel # or BertModel, for BERT without pretraining heads model = AutoModelForPreTraining.from_pretrained('neuralmind/bert-large-portuguese-cased') tokenizer = AutoTokenizer.from_pretrained('neuralmind/bert-large-portuguese-cased', do_lower_case=False) ``` ### Masked language modeling prediction example ```python from transformers import pipeline pipe = pipeline('fill-mask', model=model, tokenizer=tokenizer) pipe('Tinha uma [MASK] no meio do caminho.') # [{'score': 0.5054386258125305, # 'sequence': '[CLS] Tinha uma pedra no meio do caminho. [SEP]', # 'token': 5028, # 'token_str': 'pedra'}, # {'score': 0.05616172030568123, # 'sequence': '[CLS] Tinha uma curva no meio do caminho. [SEP]', # 'token': 9562, # 'token_str': 'curva'}, # {'score': 0.02348282001912594, # 'sequence': '[CLS] Tinha uma parada no meio do caminho. [SEP]', # 'token': 6655, # 'token_str': 'parada'}, # {'score': 0.01795753836631775, # 'sequence': '[CLS] Tinha uma mulher no meio do caminho. [SEP]', # 'token': 2606, # 'token_str': 'mulher'}, # {'score': 0.015246033668518066, # 'sequence': '[CLS] Tinha uma luz no meio do caminho. [SEP]', # 'token': 3377, # 'token_str': 'luz'}] ``` ### For BERT embeddings ```python import torch model = AutoModel.from_pretrained('neuralmind/bert-large-portuguese-cased') input_ids = tokenizer.encode('Tinha uma pedra no meio do caminho.', return_tensors='pt') with torch.no_grad(): outs = model(input_ids) encoded = outs[0][0, 1:-1] # Ignore [CLS] and [SEP] special tokens # encoded.shape: (8, 1024) # tensor([[ 1.1872, 0.5606, -0.2264, ..., 0.0117, -0.1618, -0.2286], # [ 1.3562, 0.1026, 0.1732, ..., -0.3855, -0.0832, -0.1052], # [ 0.2988, 0.2528, 0.4431, ..., 0.2684, -0.5584, 0.6524], # ..., # [ 0.3405, -0.0140, -0.0748, ..., 0.6649, -0.8983, 0.5802], # [ 0.1011, 0.8782, 0.1545, ..., -0.1768, -0.8880, -0.1095], # [ 0.7912, 0.9637, -0.3859, ..., 0.2050, -0.1350, 0.0432]]) ``` ## Citation If you use our work, please cite: ```bibtex @inproceedings{souza2020bertimbau, author = {F{\'a}bio Souza and Rodrigo Nogueira and Roberto Lotufo}, title = {{BERT}imbau: pretrained {BERT} models for {B}razilian {P}ortuguese}, booktitle = {9th Brazilian Conference on Intelligent Systems, {BRACIS}, Rio Grande do Sul, Brazil, October 20-23 (to appear)}, year = {2020} } ```
ckpt/sd15
ckpt
"2023-07-05T16:18:39Z"
113,697
1
diffusers
[ "diffusers", "license:openrail", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
null
"2022-10-21T03:51:16Z"
--- license: openrail ---
google/owlvit-base-patch16
google
"2023-12-12T13:41:12Z"
113,416
9
transformers
[ "transformers", "pytorch", "owlvit", "zero-shot-object-detection", "vision", "arxiv:2205.06230", "license:apache-2.0", "has_space", "region:us" ]
zero-shot-object-detection
"2022-07-05T07:12:33Z"
--- license: apache-2.0 tags: - vision - zero-shot-object-detection inference: false --- # Model Card: OWL-ViT ## Model Details The OWL-ViT (short for Vision Transformer for Open-World Localization) was proposed in [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. OWL-ViT is a zero-shot text-conditioned object detection model that can be used to query an image with one or multiple text queries. OWL-ViT uses CLIP as its multi-modal backbone, with a ViT-like Transformer to get visual features and a causal language model to get the text features. To use CLIP for detection, OWL-ViT removes the final token pooling layer of the vision model and attaches a lightweight classification and box head to each transformer output token. Open-vocabulary classification is enabled by replacing the fixed classification layer weights with the class-name embeddings obtained from the text model. The authors first train CLIP from scratch and fine-tune it end-to-end with the classification and box heads on standard detection datasets using a bipartite matching loss. One or multiple text queries per image can be used to perform zero-shot text-conditioned object detection. ### Model Date May 2022 ### Model Type The model uses a CLIP backbone with a ViT-B/16 Transformer architecture as an image encoder and uses a masked self-attention Transformer as a text encoder. These encoders are trained to maximize the similarity of (image, text) pairs via a contrastive loss. The CLIP backbone is trained from scratch and fine-tuned together with the box and class prediction heads with an object detection objective. ### Documents - [OWL-ViT Paper](https://arxiv.org/abs/2205.06230) ### Use with Transformers ```python3 import requests from PIL import Image import torch from transformers import OwlViTProcessor, OwlViTForObjectDetection processor = OwlViTProcessor.from_pretrained("google/owlvit-base-patch16") model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch16") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) texts = [["a photo of a cat", "a photo of a dog"]] inputs = processor(text=texts, images=image, return_tensors="pt") outputs = model(**inputs) # Target image sizes (height, width) to rescale box predictions [batch_size, 2] target_sizes = torch.Tensor([image.size[::-1]]) # Convert outputs (bounding boxes and class logits) to COCO API results = processor.post_process_object_detection(outputs=outputs, threshold=0.1, target_sizes=target_sizes) i = 0 # Retrieve predictions for the first image for the corresponding text queries text = texts[i] boxes, scores, labels = results[i]["boxes"], results[i]["scores"], results[i]["labels"] # Print detected objects and rescaled box coordinates for box, score, label in zip(boxes, scores, labels): box = [round(i, 2) for i in box.tolist()] print(f"Detected {text[label]} with confidence {round(score.item(), 3)} at location {box}") ``` ## Model Use ### Intended Use The model is intended as a research output for research communities. We hope that this model will enable researchers to better understand and explore zero-shot, text-conditioned object detection. We also hope it can be used for interdisciplinary studies of the potential impact of such models, especially in areas that commonly require identifying objects whose label is unavailable during training. #### Primary intended uses The primary intended users of these models are AI researchers. We primarily imagine the model will be used by researchers to better understand robustness, generalization, and other capabilities, biases, and constraints of computer vision models. ## Data The CLIP backbone of the model was trained on publicly available image-caption data. This was done through a combination of crawling a handful of websites and using commonly-used pre-existing image datasets such as [YFCC100M](http://projects.dfki.uni-kl.de/yfcc100m/). A large portion of the data comes from our crawling of the internet. This means that the data is more representative of people and societies most connected to the internet. The prediction heads of OWL-ViT, along with the CLIP backbone, are fine-tuned on publicly available object detection datasets such as [COCO](https://cocodataset.org/#home) and [OpenImages](https://storage.googleapis.com/openimages/web/index.html). ### BibTeX entry and citation info ```bibtex @article{minderer2022simple, title={Simple Open-Vocabulary Object Detection with Vision Transformers}, author={Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, Neil Houlsby}, journal={arXiv preprint arXiv:2205.06230}, year={2022}, } ```
cagliostrolab/animagine-xl-3.1
cagliostrolab
"2024-03-18T11:11:14Z"
113,416
306
diffusers
[ "diffusers", "safetensors", "text-to-image", "stable-diffusion", "stable-diffusion-xl", "en", "base_model:cagliostrolab/animagine-xl-3.0", "license:other", "endpoints_compatible", "has_space", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
"2024-03-13T09:40:48Z"
--- license: other license_name: faipl-1.0-sd license_link: https://freedevproject.org/faipl-1.0-sd/ language: - en tags: - text-to-image - stable-diffusion - safetensors - stable-diffusion-xl base_model: cagliostrolab/animagine-xl-3.0 widget: - text: 1girl, green hair, sweater, looking at viewer, upper body, beanie, outdoors, night, turtleneck, masterpiece, best quality, very aesthetic, absurdes parameter: negative_prompt: nsfw, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract] example_title: 1girl - text: 1boy, male focus, green hair, sweater, looking at viewer, upper body, beanie, outdoors, night, turtleneck, masterpiece, best quality, very aesthetic, absurdes parameter: negative_prompt: nsfw, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract] example_title: 1boy --- <style> .title-container { display: flex; justify-content: center; align-items: center; height: 100vh; /* Adjust this value to position the title vertically */ } .title { font-size: 2.5em; text-align: center; color: #333; font-family: 'Helvetica Neue', sans-serif; text-transform: uppercase; letter-spacing: 0.1em; padding: 0.5em 0; background: transparent; } .title span { background: -webkit-linear-gradient(45deg, #7ed56f, #28b485); -webkit-background-clip: text; -webkit-text-fill-color: transparent; } .custom-table { table-layout: fixed; width: 100%; border-collapse: collapse; margin-top: 2em; } .custom-table td { width: 50%; vertical-align: top; padding: 10px; box-shadow: 0px 0px 0px 0px rgba(0, 0, 0, 0.15); } .custom-image-container { position: relative; width: 100%; margin-bottom: 0em; overflow: hidden; border-radius: 10px; transition: transform .7s; /* Smooth transition for the container */ } .custom-image-container:hover { transform: scale(1.05); /* Scale the container on hover */ } .custom-image { width: 100%; height: auto; object-fit: cover; border-radius: 10px; transition: transform .7s; margin-bottom: 0em; } .nsfw-filter { filter: blur(8px); /* Apply a blur effect */ transition: filter 0.3s ease; /* Smooth transition for the blur effect */ } .custom-image-container:hover .nsfw-filter { filter: none; /* Remove the blur effect on hover */ } .overlay { position: absolute; bottom: 0; left: 0; right: 0; color: white; width: 100%; height: 40%; display: flex; flex-direction: column; justify-content: center; align-items: center; font-size: 1vw; font-style: bold; text-align: center; opacity: 0; /* Keep the text fully opaque */ background: linear-gradient(0deg, rgba(0, 0, 0, 0.8) 60%, rgba(0, 0, 0, 0) 100%); transition: opacity .5s; } .custom-image-container:hover .overlay { opacity: 1; } .overlay-text { background: linear-gradient(45deg, #7ed56f, #28b485); -webkit-background-clip: text; color: transparent; text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.7); .overlay-subtext { font-size: 0.75em; margin-top: 0.5em; font-style: italic; } .overlay, .overlay-subtext { text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.5); } </style> <h1 class="title"> <span>Animagine XL 3.1</span> </h1> <table class="custom-table"> <tr> <td> <div class="custom-image-container"> <img class="custom-image" src="https://cdn-uploads.huggingface.co/production/uploads/6365c8dbf31ef76df4042821/yq_5AWegnLsGyCYyqJ-1G.png" alt="sample1"> </div> <div class="custom-image-container"> <img class="custom-image" src="https://cdn-uploads.huggingface.co/production/uploads/6365c8dbf31ef76df4042821/sp6w1elvXVTbckkU74v3o.png" alt="sample4"> </div> </td> <td> <div class="custom-image-container"> <img class="custom-image" src="https://cdn-uploads.huggingface.co/production/uploads/6365c8dbf31ef76df4042821/OYBuX1XzffN7Pxi4c75JV.png" alt="sample2"> </div> <div class="custom-image-container"> <img class="custom-image" src="https://cdn-uploads.huggingface.co/production/uploads/6365c8dbf31ef76df4042821/ytT3Oaf-atbqrnPIqz_dq.png" alt="sample3"> </td> <td> <div class="custom-image-container"> <img class="custom-image" src="https://cdn-uploads.huggingface.co/production/uploads/6365c8dbf31ef76df4042821/0oRq204okFxRGECmrIK6d.png" alt="sample1"> </div> <div class="custom-image-container"> <img class="custom-image" src="https://cdn-uploads.huggingface.co/production/uploads/6365c8dbf31ef76df4042821/DW51m0HlDuAlXwu8H8bIS.png" alt="sample4"> </div> </td> </tr> </table> **Animagine XL 3.1** is an update in the Animagine XL V3 series, enhancing the previous version, Animagine XL 3.0. This open-source, anime-themed text-to-image model has been improved for generating anime-style images with higher quality. It includes a broader range of characters from well-known anime series, an optimized dataset, and new aesthetic tags for better image creation. Built on Stable Diffusion XL, Animagine XL 3.1 aims to be a valuable resource for anime fans, artists, and content creators by producing accurate and detailed representations of anime characters. ## Model Details - **Developed by**: [Cagliostro Research Lab](https://huggingface.co/cagliostrolab) - **In collaboration with**: [SeaArt.ai](https://www.seaart.ai/) - **Model type**: Diffusion-based text-to-image generative model - **Model Description**: Animagine XL 3.1 generates high-quality anime images from textual prompts. It boasts enhanced hand anatomy, improved concept understanding, and advanced prompt interpretation. - **License**: [Fair AI Public License 1.0-SD](https://freedevproject.org/faipl-1.0-sd/) - **Fine-tuned from**: [Animagine XL 3.0](https://huggingface.co/cagliostrolab/animagine-xl-3.0) ## Gradio & Colab Integration Try the demo powered by Gradio in Huggingface Spaces: [![Open In Spaces](https://img.shields.io/badge/🤗-Open%20In%20Spaces-blue.svg)](https://huggingface.co/spaces/cagliostrolab/animagine-xl-3.1) Or open the demo in Google Colab: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/#fileId=https%3A//huggingface.co/spaces/cagliostrolab/animagine-xl-3.1/blob/main/demo.ipynb) ## 🧨 Diffusers Installation First install the required libraries: ```bash pip install diffusers transformers accelerate safetensors --upgrade ``` Then run image generation with the following example code: ```python import torch from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained( "cagliostrolab/animagine-xl-3.1", torch_dtype=torch.float16, use_safetensors=True, ) pipe.to('cuda') prompt = "1girl, souryuu asuka langley, neon genesis evangelion, solo, upper body, v, smile, looking at viewer, outdoors, night" negative_prompt = "nsfw, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]" image = pipe( prompt, negative_prompt=negative_prompt, width=832, height=1216, guidance_scale=7, num_inference_steps=28 ).images[0] image.save("./output/asuka_test.png") ``` ## Usage Guidelines ### Tag Ordering For optimal results, it's recommended to follow the structured prompt template because we train the model like this: ``` 1girl/1boy, character name, from what series, everything else in any order. ``` ## Special Tags Animagine XL 3.1 utilizes special tags to steer the result toward quality, rating, creation date and aesthetic. While the model can generate images without these tags, using them can help achieve better results. ### Quality Modifiers Quality tags now consider both scores and post ratings to ensure a balanced quality distribution. We've refined labels for greater clarity, such as changing 'high quality' to 'great quality'. | Quality Modifier | Score Criterion | |------------------|-------------------| | `masterpiece` | > 95% | | `best quality` | > 85% & ≤ 95% | | `great quality` | > 75% & ≤ 85% | | `good quality` | > 50% & ≤ 75% | | `normal quality` | > 25% & ≤ 50% | | `low quality` | > 10% & ≤ 25% | | `worst quality` | ≤ 10% | ### Rating Modifiers We've also streamlined our rating tags for simplicity and clarity, aiming to establish global rules that can be applied across different models. For example, the tag 'rating: general' is now simply 'general', and 'rating: sensitive' has been condensed to 'sensitive'. | Rating Modifier | Rating Criterion | |-------------------|------------------| | `safe` | General | | `sensitive` | Sensitive | | `nsfw` | Questionable | | `explicit, nsfw` | Explicit | ### Year Modifier We've also redefined the year range to steer results towards specific modern or vintage anime art styles more accurately. This update simplifies the range, focusing on relevance to current and past eras. | Year Tag | Year Range | |----------|------------------| | `newest` | 2021 to 2024 | | `recent` | 2018 to 2020 | | `mid` | 2015 to 2017 | | `early` | 2011 to 2014 | | `oldest` | 2005 to 2010 | ### Aesthetic Tags We've enhanced our tagging system with aesthetic tags to refine content categorization based on visual appeal. These tags are derived from evaluations made by a specialized ViT (Vision Transformer) image classification model, specifically trained on anime data. For this purpose, we utilized the model [shadowlilac/aesthetic-shadow-v2](https://huggingface.co/shadowlilac/aesthetic-shadow-v2), which assesses the aesthetic value of content before it undergoes training. This ensures that each piece of content is not only relevant and accurate but also visually appealing. | Aesthetic Tag | Score Range | |-------------------|-------------------| | `very aesthetic` | > 0.71 | | `aesthetic` | > 0.45 & < 0.71 | | `displeasing` | > 0.27 & < 0.45 | | `very displeasing`| ≤ 0.27 | ## Recommended settings To guide the model towards generating high-aesthetic images, use negative prompts like: ``` nsfw, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract] ``` For higher quality outcomes, prepend prompts with: ``` masterpiece, best quality, very aesthetic, absurdres ``` it’s recommended to use a lower classifier-free guidance (CFG Scale) of around 5-7, sampling steps below 30, and to use Euler Ancestral (Euler a) as a sampler. ### Multi Aspect Resolution This model supports generating images at the following dimensions: | Dimensions | Aspect Ratio | |-------------------|-----------------| | `1024 x 1024` | 1:1 Square | | `1152 x 896` | 9:7 | | `896 x 1152` | 7:9 | | `1216 x 832` | 19:13 | | `832 x 1216` | 13:19 | | `1344 x 768` | 7:4 Horizontal | | `768 x 1344` | 4:7 Vertical | | `1536 x 640` | 12:5 Horizontal | | `640 x 1536` | 5:12 Vertical | ## Training and Hyperparameters **Animagine XL 3.1** was trained on 2x A100 80GB GPUs for approximately 15 days, totaling over 350 GPU hours. The training process consisted of three stages: - **Pretraining**: Utilized a data-rich collection of 870k ordered and tagged images to increase Animagine XL 3.0's model knowledge. - **Finetuning - First Stage**: Employed labeled and curated aesthetic datasets to refine the broken U-Net after pretraining. - **Finetuning - Second Stage**: Utilized labeled and curated aesthetic datasets to refine the model's art style and improve hand and anatomy rendering. ### Hyperparameters | Stage | Epochs | UNet lr | Train Text Encoder | Batch Size | Noise Offset | Optimizer | LR Scheduler | Grad Acc Steps | GPUs | |--------------------------|--------|---------|--------------------|------------|--------------|------------|-------------------------------|----------------|------| | **Pretraining** | 10 | 1e-5 | True | 16 | N/A | AdamW | Cosine Annealing Warm Restart | 3 | 2 | | **Finetuning 1st Stage** | 10 | 2e-6 | False | 48 | 0.0357 | Adafactor | Constant with Warmup | 1 | 1 | | **Finetuning 2nd Stage** | 15 | 1e-6 | False | 48 | 0.0357 | Adafactor | Constant with Warmup | 1 | 1 | ## Model Comparison (Pretraining only) ### Training Config | Configuration Item | Animagine XL 3.0 | Animagine XL 3.1 | |---------------------------------|------------------------------------------|------------------------------------------------| | **GPU** | 2 x A100 80G | 2 x A100 80G | | **Dataset** | 1,271,990 | 873,504 | | **Shuffle Separator** | True | True | | **Num Epochs** | 10 | 10 | | **Learning Rate** | 7.5e-6 | 1e-5 | | **Text Encoder Learning Rate** | 3.75e-6 | 1e-5 | | **Effective Batch Size** | 48 x 1 x 2 | 16 x 3 x 2 | | **Optimizer** | Adafactor | AdamW | | **Optimizer Args** | Scale Parameter: False, Relative Step: False, Warmup Init: False | Weight Decay: 0.1, Betas: (0.9, 0.99) | | **LR Scheduler** | Constant with Warmup | Cosine Annealing Warm Restart | | **LR Scheduler Args** | Warmup Steps: 100 | Num Cycles: 10, Min LR: 1e-6, LR Decay: 0.9, First Cycle Steps: 9,099 | Source code and training config are available here: https://github.com/cagliostrolab/sd-scripts/tree/main/notebook ### Acknowledgements The development and release of Animagine XL 3.1 would not have been possible without the invaluable contributions and support from the following individuals and organizations: - **[SeaArt.ai](https://www.seaart.ai/)**: Our collaboration partner and sponsor. - **[Shadow Lilac](https://huggingface.co/shadowlilac)**: For providing the aesthetic classification model, [aesthetic-shadow-v2](https://huggingface.co/shadowlilac/aesthetic-shadow-v2). - **[Derrian Distro](https://github.com/derrian-distro)**: For their custom learning rate scheduler, adapted from [LoRA Easy Training Scripts](https://github.com/derrian-distro/LoRA_Easy_Training_Scripts/blob/main/custom_scheduler/LoraEasyCustomOptimizer/CustomOptimizers.py). - **[Kohya SS](https://github.com/kohya-ss)**: For their comprehensive training scripts. - **Cagliostrolab Collaborators**: For their dedication to model training, project management, and data curation. - **Early Testers**: For their valuable feedback and quality assurance efforts. - **NovelAI**: For their innovative approach to aesthetic tagging, which served as an inspiration for our implementation. - **KBlueLeaf**: For providing inspiration in balancing quality tags distribution and managing tags based on [Hakubooru Metainfo](https://github.com/KohakuBlueleaf/HakuBooru/blob/main/hakubooru/metainfo.py) Thank you all for your support and expertise in pushing the boundaries of anime-style image generation. ## Collaborators - [Linaqruf](https://huggingface.co/Linaqruf) - [ItsMeBell](https://huggingface.co/ItsMeBell) - [Asahina2K](https://huggingface.co/Asahina2K) - [DamarJati](https://huggingface.co/DamarJati) - [Zwicky18](https://huggingface.co/Zwicky18) - [Scipius2121](https://huggingface.co/Scipius2121) - [Raelina](https://huggingface.co/Raelina) - [Kayfahaarukku](https://huggingface.co/kayfahaarukku) - [Kriz](https://huggingface.co/Kr1SsSzz) ## Limitations While Animagine XL 3.1 represents a significant advancement in anime-style image generation, it is important to acknowledge its limitations: 1. **Anime-Focused**: This model is specifically designed for generating anime-style images and is not suitable for creating realistic photos. 2. **Prompt Complexity**: This model may not be suitable for users who expect high-quality results from short or simple prompts. The training focus was on concept understanding rather than aesthetic refinement, which may require more detailed and specific prompts to achieve the desired output. 3. **Prompt Format**: Animagine XL 3.1 is optimized for Danbooru-style tags rather than natural language prompts. For best results, users are encouraged to format their prompts using the appropriate tags and syntax. 4. **Anatomy and Hand Rendering**: Despite the improvements made in anatomy and hand rendering, there may still be instances where the model produces suboptimal results in these areas. 5. **Dataset Size**: The dataset used for training Animagine XL 3.1 consists of approximately 870,000 images. When combined with the previous iteration's dataset (1.2 million), the total training data amounts to around 2.1 million images. While substantial, this dataset size may still be considered limited in scope for an "ultimate" anime model. 6. **NSFW Content**: Animagine XL 3.1 has been designed to generate more balanced NSFW content. However, it is important to note that the model may still produce NSFW results, even if not explicitly prompted. By acknowledging these limitations, we aim to provide transparency and set realistic expectations for users of Animagine XL 3.1. Despite these constraints, we believe that the model represents a significant step forward in anime-style image generation and offers a powerful tool for artists, designers, and enthusiasts alike. ## License Based on Animagine XL 3.0, Animagine XL 3.1 falls under [Fair AI Public License 1.0-SD](https://freedevproject.org/faipl-1.0-sd/) license, which is compatible with Stable Diffusion models’ license. Key points: 1. **Modification Sharing:** If you modify Animagine XL 3.1, you must share both your changes and the original license. 2. **Source Code Accessibility:** If your modified version is network-accessible, provide a way (like a download link) for others to get the source code. This applies to derived models too. 3. **Distribution Terms:** Any distribution must be under this license or another with similar rules. 4. **Compliance:** Non-compliance must be fixed within 30 days to avoid license termination, emphasizing transparency and adherence to open-source values. The choice of this license aims to keep Animagine XL 3.1 open and modifiable, aligning with open source community spirit. It protects contributors and users, encouraging a collaborative, ethical open-source community. This ensures the model not only benefits from communal input but also respects open-source development freedoms. ## Cagliostro Lab Discord Server Finally Cagliostro Lab Server open to public https://discord.gg/cqh9tZgbGc Feel free to join our discord server
vikhyatk/moondream1
vikhyatk
"2024-02-07T02:57:53Z"
113,371
462
transformers
[ "transformers", "pytorch", "safetensors", "moondream1", "text-generation", "custom_code", "en", "autotrain_compatible", "has_space", "region:us" ]
text-generation
"2024-01-20T18:10:04Z"
--- language: - en --- # 🌔 moondream1 1.6B parameter model built by [@vikhyatk](https://x.com/vikhyatk) using SigLIP, Phi-1.5 and the LLaVa training dataset. The model is release for research purposes only, commercial use is not allowed. Try it out on [Huggingface Spaces](https://huggingface.co/spaces/vikhyatk/moondream1)! **Usage** ``` pip install transformers timm einops ``` ```python from transformers import AutoModelForCausalLM, CodeGenTokenizerFast as Tokenizer from PIL import Image model_id = "vikhyatk/moondream1" model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True) tokenizer = Tokenizer.from_pretrained(model_id) image = Image.open('<IMAGE_PATH>') enc_image = model.encode_image(image) print(model.answer_question(enc_image, "<QUESTION>", tokenizer)) ``` ## Benchmarks | Model | Parameters | VQAv2 | GQA | TextVQA | | --- | --- | --- | --- | --- | | LLaVA-1.5 | 13.3B | 80.0 | 63.3 | 61.3 | | LLaVA-1.5 | 7.3B | 78.5 | 62.0 | 58.2 | | **moondream1** | 1.6B | 74.7 | 57.9 | 35.6 | ## Examples | Image | Examples | | --- | --- | | ![](assets/demo-1.jpg) | **What is the title of this book?**<br>The Little Book of Deep Learning<br><br>**What can you tell me about this book?**<br>The book in the image is titled "The Little Book of Deep Learning." It appears to be a guide or manual that provides information and instructions on the subject of deep learning. The book is described as being small and thin, making it easy to carry and read. It is set in a room with a view outside, suggesting that the person holding the book might be in a living or working space with a view. The content of the book is likely to be complex, as the title implies a focus on advanced concepts in deep learning.<br><br>**Where is the person standing?**<br> The person is standing on a balcony or deck, which is outside. | | ![](assets/demo-2.jpg) | **What type of food is the girl holding?**<br>The girl is holding a hamburger.<br><br>**What color is the woman's hair?**<br>The woman's hair is white.<br><br>**What is the girl doing in the image?**<br>The girl in the image is eating a hamburger. | | ![](assets/demo-3.jpg) | **What color is the train?**<br>The train is red.<br><br>**What is the bus doing in the image?**<br> The bus in the image is driving down a city street, passing through an intersection.<br><br>**What kind of public transportation is in the image?**<br>The image features a red train or trolley as the public transportation option. | | ![](assets/demo-4.jpg) | **What is the girl looking at?**<br>The girl is looking at her reflection in the mirror.<br><br>**What color is the woman's hair?**<br>The woman's hair is brown.<br><br>**What is the woman doing in the mirror?**<br>The woman is adjusting her hair in the mirror. | | ![](assets/demo-5.jpg) | **What might be the reason for the dog's aggressive behavior?**<br>The dog's aggressive behavior, including its growling, baring its teeth, and showing its sharp teeth, could be due to various reasons. It is possible that the dog is feeling threatened, scared, or defensive about something in its environment, such as another animal, a loud noise, or a person nearby. Alternatively, the dog might be reacting to a command it received from its owner, or it could be experiencing discomfort or pain, which is causing it to display aggressive behavior. It is essential to observe the dog's body language and context to determine the underlying cause of its aggressive behavior and respond accordingly to address its needs or concerns.<br><br>**What kind of dog is in the picture?**<br>The dog in the picture is a small, black and white chihuahua. |
EleutherAI/gpt-neo-2.7B
EleutherAI
"2023-07-09T15:52:52Z"
112,788
387
transformers
[ "transformers", "pytorch", "jax", "rust", "safetensors", "gpt_neo", "text-generation", "text generation", "causal-lm", "en", "dataset:EleutherAI/pile", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text-generation
"2022-03-02T23:29:04Z"
--- language: - en tags: - text generation - pytorch - causal-lm license: mit datasets: - EleutherAI/pile --- # GPT-Neo 2.7B ## Model Description GPT-Neo 2.7B is a transformer model designed using EleutherAI's replication of the GPT-3 architecture. GPT-Neo refers to the class of models, while 2.7B represents the number of parameters of this particular pre-trained model. ## Training data GPT-Neo 2.7B was trained on the Pile, a large scale curated dataset created by EleutherAI for the purpose of training this model. ## Training procedure This model was trained for 420 billion tokens over 400,000 steps. It was trained as a masked autoregressive language model, using cross-entropy loss. ## Intended Use and Limitations This way, the model learns an inner representation of the English language that can then be used to extract features useful for downstream tasks. The model is best at what it was pretrained for however, which is generating texts from a prompt. ### How to use You can use this model directly with a pipeline for text generation. This example generates a different sequence each time it's run: ```py >>> from transformers import pipeline >>> generator = pipeline('text-generation', model='EleutherAI/gpt-neo-2.7B') >>> generator("EleutherAI has", do_sample=True, min_length=50) [{'generated_text': 'EleutherAI has made a commitment to create new software packages for each of its major clients and has'}] ``` ### Limitations and Biases GPT-Neo was trained as an autoregressive language model. This means that its core functionality is taking a string of text and predicting the next token. While language models are widely used for tasks other than this, there are a lot of unknowns with this work. GPT-Neo was trained on the Pile, a dataset known to contain profanity, lewd, and otherwise abrasive language. Depending on your usecase GPT-Neo may produce socially unacceptable text. See Sections 5 and 6 of the Pile paper for a more detailed analysis of the biases in the Pile. As with all language models, it is hard to predict in advance how GPT-Neo will respond to particular prompts and offensive content may occur without warning. We recommend having a human curate or filter the outputs before releasing them, both to censor undesirable content and to improve the quality of the results. ## Eval results All evaluations were done using our [evaluation harness](https://github.com/EleutherAI/lm-evaluation-harness). Some results for GPT-2 and GPT-3 are inconsistent with the values reported in the respective papers. We are currently looking into why, and would greatly appreciate feedback and further testing of our eval harness. If you would like to contribute evaluations you have done, please reach out on our [Discord](https://discord.gg/vtRgjbM). ### Linguistic Reasoning | Model and Size | Pile BPB | Pile PPL | Wikitext PPL | Lambada PPL | Lambada Acc | Winogrande | Hellaswag | | ---------------- | ---------- | ---------- | ------------- | ----------- | ----------- | ---------- | ----------- | | GPT-Neo 1.3B | 0.7527 | 6.159 | 13.10 | 7.498 | 57.23% | 55.01% | 38.66% | | GPT-2 1.5B | 1.0468 | ----- | 17.48 | 10.634 | 51.21% | 59.40% | 40.03% | | **GPT-Neo 2.7B** | **0.7165** | **5.646** | **11.39** | **5.626** | **62.22%** | **56.50%** | **42.73%** | | GPT-3 Ada | 0.9631 | ----- | ----- | 9.954 | 51.60% | 52.90% | 35.93% | ### Physical and Scientific Reasoning | Model and Size | MathQA | PubMedQA | Piqa | | ---------------- | ---------- | ---------- | ----------- | | GPT-Neo 1.3B | 24.05% | 54.40% | 71.11% | | GPT-2 1.5B | 23.64% | 58.33% | 70.78% | | **GPT-Neo 2.7B** | **24.72%** | **57.54%** | **72.14%** | | GPT-3 Ada | 24.29% | 52.80% | 68.88% | ### Down-Stream Applications TBD ### BibTeX entry and citation info To cite this model, use ```bibtex @software{gpt-neo, author = {Black, Sid and Leo, Gao and Wang, Phil and Leahy, Connor and Biderman, Stella}, title = {{GPT-Neo: Large Scale Autoregressive Language Modeling with Mesh-Tensorflow}}, month = mar, year = 2021, note = {{If you use this software, please cite it using these metadata.}}, publisher = {Zenodo}, version = {1.0}, doi = {10.5281/zenodo.5297715}, url = {https://doi.org/10.5281/zenodo.5297715} } @article{gao2020pile, title={The Pile: An 800GB Dataset of Diverse Text for Language Modeling}, author={Gao, Leo and Biderman, Stella and Black, Sid and Golding, Laurence and Hoppe, Travis and Foster, Charles and Phang, Jason and He, Horace and Thite, Anish and Nabeshima, Noa and others}, journal={arXiv preprint arXiv:2101.00027}, year={2020} } ```
JackFram/llama-68m
JackFram
"2024-01-04T09:26:53Z"
112,683
12
transformers
[ "transformers", "pytorch", "llama", "text-generation", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-07-19T02:20:03Z"
--- license: apache-2.0 ---
sentence-transformers/paraphrase-MiniLM-L3-v2
sentence-transformers
"2024-03-27T12:09:47Z"
112,530
16
sentence-transformers
[ "sentence-transformers", "pytorch", "tf", "safetensors", "bert", "feature-extraction", "sentence-similarity", "transformers", "dataset:flax-sentence-embeddings/stackexchange_xml", "dataset:s2orc", "dataset:ms_marco", "dataset:wiki_atomic_edits", "dataset:snli", "dataset:multi_nli", "dataset:embedding-data/altlex", "dataset:embedding-data/simple-wiki", "dataset:embedding-data/flickr30k-captions", "dataset:embedding-data/coco_captions", "dataset:embedding-data/sentence-compression", "dataset:embedding-data/QQP", "dataset:yahoo_answers_topics", "arxiv:1908.10084", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
sentence-similarity
"2022-03-02T23:29:05Z"
--- license: apache-2.0 library_name: sentence-transformers tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers datasets: - flax-sentence-embeddings/stackexchange_xml - s2orc - ms_marco - wiki_atomic_edits - snli - multi_nli - embedding-data/altlex - embedding-data/simple-wiki - embedding-data/flickr30k-captions - embedding-data/coco_captions - embedding-data/sentence-compression - embedding-data/QQP - yahoo_answers_topics pipeline_tag: sentence-similarity --- # sentence-transformers/paraphrase-MiniLM-L3-v2 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search. ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('sentence-transformers/paraphrase-MiniLM-L3-v2') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/paraphrase-MiniLM-L3-v2') model = AutoModel.from_pretrained('sentence-transformers/paraphrase-MiniLM-L3-v2') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, max pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=sentence-transformers/paraphrase-MiniLM-L3-v2) ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors This model was trained by [sentence-transformers](https://www.sbert.net/). If you find this model helpful, feel free to cite our publication [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](https://arxiv.org/abs/1908.10084): ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "http://arxiv.org/abs/1908.10084", } ```
TheBloke/Llama-2-7B-Chat-GGUF
TheBloke
"2023-10-14T21:36:33Z"
112,412
354
transformers
[ "transformers", "gguf", "llama", "facebook", "meta", "pytorch", "llama-2", "text-generation", "en", "arxiv:2307.09288", "base_model:meta-llama/Llama-2-7b-chat-hf", "license:llama2", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2023-09-04T16:38:41Z"
--- language: - en license: llama2 tags: - facebook - meta - pytorch - llama - llama-2 model_name: Llama 2 7B Chat arxiv: 2307.09288 base_model: meta-llama/Llama-2-7b-chat-hf inference: false model_creator: Meta Llama 2 model_type: llama pipeline_tag: text-generation prompt_template: '[INST] <<SYS>> You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don''t know the answer to a question, please don''t share false information. <</SYS>> {prompt}[/INST] ' quantized_by: TheBloke --- <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Llama 2 7B Chat - GGUF - Model creator: [Meta Llama 2](https://huggingface.co/meta-llama) - Original model: [Llama 2 7B Chat](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) <!-- description start --> ## Description This repo contains GGUF format model files for [Meta Llama 2's Llama 2 7B Chat](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf). <!-- description end --> <!-- README_GGUF.md-about-gguf start --> ### About GGUF GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp. GGUF offers numerous advantages over GGML, such as better tokenisation, and support for special tokens. It is also supports metadata, and is designed to be extensible. Here is an incomplate list of clients and libraries that are known to support GGUF: * [llama.cpp](https://github.com/ggerganov/llama.cpp). The source project for GGUF. Offers a CLI and a server option. * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration. * [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for story telling. * [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration. * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection. * [Faraday.dev](https://faraday.dev/), an attractive and easy to use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration. * [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server. * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server. * [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use. <!-- README_GGUF.md-about-gguf end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/Llama-2-7b-Chat-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF) * [Meta Llama 2's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: Llama-2-Chat ``` [INST] <<SYS>> You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. <</SYS>> {prompt}[/INST] ``` <!-- prompt-template end --> <!-- compatibility_gguf start --> ## Compatibility These quantised GGUFv2 files are compatible with llama.cpp from August 27th onwards, as of commit [d0cee0d36d5be95a0d9088b674dbb27354107221](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) They are also compatible with many third party UIs and libraries - please see the list at the top of this README. ## Explanation of quantisation methods <details> <summary>Click to see details</summary> The new methods available are: * GGML_TYPE_Q2_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw) * GGML_TYPE_Q3_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw. * GGML_TYPE_Q4_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw. * GGML_TYPE_Q5_K - "type-1" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw * GGML_TYPE_Q6_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw Refer to the Provided Files table below to see what files use which methods, and how. </details> <!-- compatibility_gguf end --> <!-- README_GGUF.md-provided-files start --> ## Provided files | Name | Quant method | Bits | Size | Max RAM required | Use case | | ---- | ---- | ---- | ---- | ---- | ----- | | [llama-2-7b-chat.Q2_K.gguf](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/blob/main/llama-2-7b-chat.Q2_K.gguf) | Q2_K | 2 | 2.83 GB| 5.33 GB | smallest, significant quality loss - not recommended for most purposes | | [llama-2-7b-chat.Q3_K_S.gguf](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/blob/main/llama-2-7b-chat.Q3_K_S.gguf) | Q3_K_S | 3 | 2.95 GB| 5.45 GB | very small, high quality loss | | [llama-2-7b-chat.Q3_K_M.gguf](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/blob/main/llama-2-7b-chat.Q3_K_M.gguf) | Q3_K_M | 3 | 3.30 GB| 5.80 GB | very small, high quality loss | | [llama-2-7b-chat.Q3_K_L.gguf](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/blob/main/llama-2-7b-chat.Q3_K_L.gguf) | Q3_K_L | 3 | 3.60 GB| 6.10 GB | small, substantial quality loss | | [llama-2-7b-chat.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/blob/main/llama-2-7b-chat.Q4_0.gguf) | Q4_0 | 4 | 3.83 GB| 6.33 GB | legacy; small, very high quality loss - prefer using Q3_K_M | | [llama-2-7b-chat.Q4_K_S.gguf](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/blob/main/llama-2-7b-chat.Q4_K_S.gguf) | Q4_K_S | 4 | 3.86 GB| 6.36 GB | small, greater quality loss | | [llama-2-7b-chat.Q4_K_M.gguf](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/blob/main/llama-2-7b-chat.Q4_K_M.gguf) | Q4_K_M | 4 | 4.08 GB| 6.58 GB | medium, balanced quality - recommended | | [llama-2-7b-chat.Q5_0.gguf](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/blob/main/llama-2-7b-chat.Q5_0.gguf) | Q5_0 | 5 | 4.65 GB| 7.15 GB | legacy; medium, balanced quality - prefer using Q4_K_M | | [llama-2-7b-chat.Q5_K_S.gguf](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/blob/main/llama-2-7b-chat.Q5_K_S.gguf) | Q5_K_S | 5 | 4.65 GB| 7.15 GB | large, low quality loss - recommended | | [llama-2-7b-chat.Q5_K_M.gguf](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/blob/main/llama-2-7b-chat.Q5_K_M.gguf) | Q5_K_M | 5 | 4.78 GB| 7.28 GB | large, very low quality loss - recommended | | [llama-2-7b-chat.Q6_K.gguf](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/blob/main/llama-2-7b-chat.Q6_K.gguf) | Q6_K | 6 | 5.53 GB| 8.03 GB | very large, extremely low quality loss | | [llama-2-7b-chat.Q8_0.gguf](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/blob/main/llama-2-7b-chat.Q8_0.gguf) | Q8_0 | 8 | 7.16 GB| 9.66 GB | very large, extremely low quality loss - not recommended | **Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead. <!-- README_GGUF.md-provided-files end --> <!-- README_GGUF.md-how-to-download start --> ## How to download GGUF files **Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single file. The following clients/libraries will automatically download models for you, providing a list of available models to choose from: - LM Studio - LoLLMS Web UI - Faraday.dev ### In `text-generation-webui` Under Download Model, you can enter the model repo: TheBloke/Llama-2-7b-Chat-GGUF and below it, a specific filename to download, such as: llama-2-7b-chat.q4_K_M.gguf. Then click Download. ### On the command line, including multiple files at once I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub>=0.17.1 ``` Then you can download any individual model file to the current directory, at high speed, with a command like this: ```shell huggingface-cli download TheBloke/Llama-2-7b-Chat-GGUF llama-2-7b-chat.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage</summary> You can also download multiple files at once with a pattern: ```shell huggingface-cli download TheBloke/Llama-2-7b-Chat-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf' ``` For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell HUGGINGFACE_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/Llama-2-7b-Chat-GGUF llama-2-7b-chat.q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` Windows CLI users: Use `set HUGGINGFACE_HUB_ENABLE_HF_TRANSFER=1` before running the download command. </details> <!-- README_GGUF.md-how-to-download end --> <!-- README_GGUF.md-how-to-run start --> ## Example `llama.cpp` command Make sure you are using `llama.cpp` from commit [d0cee0d36d5be95a0d9088b674dbb27354107221](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later. ```shell ./main -ngl 32 -m llama-2-7b-chat.q4_K_M.gguf --color -c 4096 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "[INST] <<SYS>>\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\n<</SYS>>\n{prompt}[/INST]" ``` Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. Change `-c 4096` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins` For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) ## How to run in `text-generation-webui` Further instructions here: [text-generation-webui/docs/llama.cpp.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp.md). ## How to run from Python code You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. ### How to load this model from Python using ctransformers #### First install the package ```bash # Base ctransformers with no GPU acceleration pip install ctransformers>=0.2.24 # Or with CUDA GPU acceleration pip install ctransformers[cuda]>=0.2.24 # Or with ROCm GPU acceleration CT_HIPBLAS=1 pip install ctransformers>=0.2.24 --no-binary ctransformers # Or with Metal GPU acceleration for macOS systems CT_METAL=1 pip install ctransformers>=0.2.24 --no-binary ctransformers ``` #### Simple example code to load one of these GGUF models ```python from ctransformers import AutoModelForCausalLM # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system. llm = AutoModelForCausalLM.from_pretrained("TheBloke/Llama-2-7b-Chat-GGUF", model_file="llama-2-7b-chat.q4_K_M.gguf", model_type="llama", gpu_layers=50) print(llm("AI is going to")) ``` ## How to use with LangChain Here's guides on using llama-cpp-python or ctransformers with LangChain: * [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp) * [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers) <!-- README_GGUF.md-how-to-run end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Alicia Loh, Stephen Murray, K, Ajan Kanaga, RoA, Magnesian, Deo Leter, Olakabola, Eugene Pentland, zynix, Deep Realms, Raymond Fosdick, Elijah Stavena, Iucharbius, Erik Bjäreholt, Luis Javier Navarrete Lozano, Nicholas, theTransient, John Detwiler, alfie_i, knownsqashed, Mano Prime, Willem Michiel, Enrico Ros, LangChain4j, OG, Michael Dempsey, Pierre Kircher, Pedro Madruga, James Bentley, Thomas Belote, Luke @flexchar, Leonard Tan, Johann-Peter Hartmann, Illia Dulskyi, Fen Risland, Chadd, S_X, Jeff Scroggin, Ken Nordquist, Sean Connelly, Artur Olbinski, Swaroop Kallakuri, Jack West, Ai Maven, David Ziegler, Russ Johnson, transmissions 11, John Villwock, Alps Aficionado, Clay Pascal, Viktor Bowallius, Subspace Studios, Rainer Wilmers, Trenton Dambrowitz, vamX, Michael Levine, 준교 김, Brandon Frisco, Kalila, Trailburnt, Randy H, Talal Aujan, Nathan Dryer, Vadim, 阿明, ReadyPlayerEmma, Tiffany J. Kim, George Stoitzev, Spencer Kim, Jerry Meng, Gabriel Tamborski, Cory Kujawski, Jeffrey Morgan, Spiking Neurons AB, Edmond Seymore, Alexandros Triantafyllidis, Lone Striker, Cap'n Zoog, Nikolai Manek, danny, ya boyyy, Derek Yates, usrbinkat, Mandus, TL, Nathan LeClaire, subjectnull, Imad Khwaja, webtim, Raven Klaugh, Asp the Wyvern, Gabriel Puliatti, Caitlyn Gatomon, Joseph William Delisle, Jonathan Leane, Luke Pendergrass, SuperWojo, Sebastain Graf, Will Dee, Fred von Graf, Andrey, Dan Guido, Daniel P. Andersen, Nitin Borwankar, Elle, Vitor Caleffi, biorpg, jjj, NimbleBox.ai, Pieter, Matthew Berman, terasurfer, Michael Davis, Alex, Stanislav Ovsiannikov Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> <!-- original-model-card start --> # Original model card: Meta Llama 2's Llama 2 7B Chat # **Llama 2** Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. ## Model Details *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. **Model Developers** Meta **Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. **Input** Models input text only. **Output** Models generate text only. **Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. ||Training Data|Params|Content Length|GQA|Tokens|LR| |---|---|---|---|---|---|---| |Llama 2|*A new mix of publicly available online data*|7B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|13B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|70B|4k|&#10004;|2.0T|1.5 x 10<sup>-4</sup>| *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. **Model Dates** Llama 2 was trained between January 2023 and July 2023. **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) **Research Paper** ["Llama-2: Open Foundation and Fine-tuned Chat Models"](arxiv.org/abs/2307.09288) ## Intended Use **Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the `INST` and `<<SYS>>` tags, `BOS` and `EOS` tokens, and the whitespaces and breaklines in between (we recommend calling `strip()` on inputs to avoid double-spaces). See our reference code in github for details: [`chat_completion`](https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212). **Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. ## Hardware and Software **Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. **Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. ||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)| |---|---|---|---| |Llama 2 7B|184320|400|31.22| |Llama 2 13B|368640|400|62.44| |Llama 2 70B|1720320|400|291.42| |Total|3311616||539.00| **CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. ## Training Data **Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. **Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. ## Evaluation Results In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. |Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval| |---|---|---|---|---|---|---|---|---|---| |Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9| |Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9| |Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7| |Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6| |Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3| |Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1| |Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**| **Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. |||TruthfulQA|Toxigen| |---|---|---|---| |Llama 1|7B|27.42|23.00| |Llama 1|13B|41.74|23.08| |Llama 1|33B|44.19|22.57| |Llama 1|65B|48.71|21.77| |Llama 2|7B|33.29|**21.25**| |Llama 2|13B|41.86|26.10| |Llama 2|70B|**50.18**|24.60| **Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). |||TruthfulQA|Toxigen| |---|---|---|---| |Llama-2-Chat|7B|57.04|**0.00**| |Llama-2-Chat|13B|62.18|**0.00**| |Llama-2-Chat|70B|**64.14**|0.01| **Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above. ## Ethical Considerations and Limitations Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide) ## Reporting Issues Please report any software “bug,” or other problems with the models through one of the following means: - Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) - Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) - Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) ## Llama Model Index |Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf| |---|---|---|---|---| |7B| [Link](https://huggingface.co/llamaste/Llama-2-7b) | [Link](https://huggingface.co/llamaste/Llama-2-7b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat-hf)| |13B| [Link](https://huggingface.co/llamaste/Llama-2-13b) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-13b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf)| |70B| [Link](https://huggingface.co/llamaste/Llama-2-70b) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-70b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf)| <!-- original-model-card end -->
TheBloke/Mistral-7B-Instruct-v0.2-GPTQ
TheBloke
"2023-12-11T22:46:53Z"
111,327
36
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "finetuned", "conversational", "arxiv:2310.06825", "base_model:mistralai/Mistral-7B-Instruct-v0.2", "license:apache-2.0", "autotrain_compatible", "has_space", "text-generation-inference", "4-bit", "region:us" ]
text-generation
"2023-12-11T22:18:46Z"
--- base_model: mistralai/Mistral-7B-Instruct-v0.2 inference: false license: apache-2.0 model_creator: Mistral AI_ model_name: Mistral 7B Instruct v0.2 model_type: mistral pipeline_tag: text-generation prompt_template: '<s>[INST] {prompt} [/INST] ' quantized_by: TheBloke tags: - finetuned --- <!-- markdownlint-disable MD041 --> <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Mistral 7B Instruct v0.2 - GPTQ - Model creator: [Mistral AI_](https://huggingface.co/mistralai) - Original model: [Mistral 7B Instruct v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) <!-- description start --> # Description This repo contains GPTQ model files for [Mistral AI_'s Mistral 7B Instruct v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2). Multiple GPTQ parameter permutations are provided; see Provided Files below for details of the options provided, their parameters, and the software used to create them. These files were quantised using hardware kindly provided by [Massed Compute](https://massedcompute.com/). <!-- description end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF) * [Mistral AI_'s original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: Mistral ``` <s>[INST] {prompt} [/INST] ``` <!-- prompt-template end --> <!-- README_GPTQ.md-compatible clients start --> ## Known compatible clients / servers GPTQ models are currently supported on Linux (NVidia/AMD) and Windows (NVidia only). macOS users: please use GGUF models. These GPTQ models are known to work in the following inference servers/webuis. - [text-generation-webui](https://github.com/oobabooga/text-generation-webui) - [KoboldAI United](https://github.com/henk717/koboldai) - [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui) - [Hugging Face Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) This may not be a complete list; if you know of others, please let me know! <!-- README_GPTQ.md-compatible clients end --> <!-- README_GPTQ.md-provided-files start --> ## Provided files, and GPTQ parameters Multiple quantisation parameters are provided, to allow you to choose the best one for your hardware and requirements. Each separate quant is in a different branch. See below for instructions on fetching from different branches. Most GPTQ files are made with AutoGPTQ. Mistral models are currently made with Transformers. <details> <summary>Explanation of GPTQ parameters</summary> - Bits: The bit size of the quantised model. - GS: GPTQ group size. Higher numbers use less VRAM, but have lower quantisation accuracy. "None" is the lowest possible value. - Act Order: True or False. Also known as `desc_act`. True results in better quantisation accuracy. Some GPTQ clients have had issues with models that use Act Order plus Group Size, but this is generally resolved now. - Damp %: A GPTQ parameter that affects how samples are processed for quantisation. 0.01 is default, but 0.1 results in slightly better accuracy. - GPTQ dataset: The calibration dataset used during quantisation. Using a dataset more appropriate to the model's training can improve quantisation accuracy. Note that the GPTQ calibration dataset is not the same as the dataset used to train the model - please refer to the original model repo for details of the training dataset(s). - Sequence Length: The length of the dataset sequences used for quantisation. Ideally this is the same as the model sequence length. For some very long sequence models (16+K), a lower sequence length may have to be used. Note that a lower sequence length does not limit the sequence length of the quantised model. It only impacts the quantisation accuracy on longer inference sequences. - ExLlama Compatibility: Whether this file can be loaded with ExLlama, which currently only supports Llama and Mistral models in 4-bit. </details> | Branch | Bits | GS | Act Order | Damp % | GPTQ Dataset | Seq Len | Size | ExLlama | Desc | | ------ | ---- | -- | --------- | ------ | ------------ | ------- | ---- | ------- | ---- | | [main](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GPTQ/tree/main) | 4 | 128 | Yes | 0.1 | [VMware Open Instruct](https://huggingface.co/datasets/VMware/open-instruct/viewer/) | 4096 | 4.16 GB | Yes | 4-bit, with Act Order and group size 128g. Uses even less VRAM than 64g, but with slightly lower accuracy. | | [gptq-4bit-32g-actorder_True](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GPTQ/tree/gptq-4bit-32g-actorder_True) | 4 | 32 | Yes | 0.1 | [VMware Open Instruct](https://huggingface.co/datasets/VMware/open-instruct/viewer/) | 4096 | 4.57 GB | Yes | 4-bit, with Act Order and group size 32g. Gives highest possible inference quality, with maximum VRAM usage. | | [gptq-8bit--1g-actorder_True](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GPTQ/tree/gptq-8bit--1g-actorder_True) | 8 | None | Yes | 0.1 | [VMware Open Instruct](https://huggingface.co/datasets/VMware/open-instruct/viewer/) | 4096 | 7.52 GB | No | 8-bit, with Act Order. No group size, to lower VRAM requirements. | | [gptq-8bit-128g-actorder_True](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GPTQ/tree/gptq-8bit-128g-actorder_True) | 8 | 128 | Yes | 0.1 | [VMware Open Instruct](https://huggingface.co/datasets/VMware/open-instruct/viewer/) | 4096 | 7.68 GB | No | 8-bit, with group size 128g for higher inference quality and with Act Order for even higher accuracy. | | [gptq-8bit-32g-actorder_True](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GPTQ/tree/gptq-8bit-32g-actorder_True) | 8 | 32 | Yes | 0.1 | [VMware Open Instruct](https://huggingface.co/datasets/VMware/open-instruct/viewer/) | 4096 | 8.17 GB | No | 8-bit, with group size 32g and Act Order for maximum inference quality. | | [gptq-4bit-64g-actorder_True](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GPTQ/tree/gptq-4bit-64g-actorder_True) | 4 | 64 | Yes | 0.1 | [VMware Open Instruct](https://huggingface.co/datasets/VMware/open-instruct/viewer/) | 4096 | 4.29 GB | Yes | 4-bit, with Act Order and group size 64g. Uses less VRAM than 32g, but with slightly lower accuracy. | <!-- README_GPTQ.md-provided-files end --> <!-- README_GPTQ.md-download-from-branches start --> ## How to download, including from branches ### In text-generation-webui To download from the `main` branch, enter `TheBloke/Mistral-7B-Instruct-v0.2-GPTQ` in the "Download model" box. To download from another branch, add `:branchname` to the end of the download name, eg `TheBloke/Mistral-7B-Instruct-v0.2-GPTQ:gptq-4bit-32g-actorder_True` ### From the command line I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub ``` To download the `main` branch to a folder called `Mistral-7B-Instruct-v0.2-GPTQ`: ```shell mkdir Mistral-7B-Instruct-v0.2-GPTQ huggingface-cli download TheBloke/Mistral-7B-Instruct-v0.2-GPTQ --local-dir Mistral-7B-Instruct-v0.2-GPTQ --local-dir-use-symlinks False ``` To download from a different branch, add the `--revision` parameter: ```shell mkdir Mistral-7B-Instruct-v0.2-GPTQ huggingface-cli download TheBloke/Mistral-7B-Instruct-v0.2-GPTQ --revision gptq-4bit-32g-actorder_True --local-dir Mistral-7B-Instruct-v0.2-GPTQ --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage</summary> If you remove the `--local-dir-use-symlinks False` parameter, the files will instead be stored in the central Hugging Face cache directory (default location on Linux is: `~/.cache/huggingface`), and symlinks will be added to the specified `--local-dir`, pointing to their real location in the cache. This allows for interrupted downloads to be resumed, and allows you to quickly clone the repo to multiple places on disk without triggering a download again. The downside, and the reason why I don't list that as the default option, is that the files are then hidden away in a cache folder and it's harder to know where your disk space is being used, and to clear it up if/when you want to remove a download model. The cache location can be changed with the `HF_HOME` environment variable, and/or the `--cache-dir` parameter to `huggingface-cli`. For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell mkdir Mistral-7B-Instruct-v0.2-GPTQ HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/Mistral-7B-Instruct-v0.2-GPTQ --local-dir Mistral-7B-Instruct-v0.2-GPTQ --local-dir-use-symlinks False ``` Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command. </details> ### With `git` (**not** recommended) To clone a specific branch with `git`, use a command like this: ```shell git clone --single-branch --branch gptq-4bit-32g-actorder_True https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GPTQ ``` Note that using Git with HF repos is strongly discouraged. It will be much slower than using `huggingface-hub`, and will use twice as much disk space as it has to store the model files twice (it stores every byte both in the intended target folder, and again in the `.git` folder as a blob.) <!-- README_GPTQ.md-download-from-branches end --> <!-- README_GPTQ.md-text-generation-webui start --> ## How to easily download and use this model in [text-generation-webui](https://github.com/oobabooga/text-generation-webui) Please make sure you're using the latest version of [text-generation-webui](https://github.com/oobabooga/text-generation-webui). It is strongly recommended to use the text-generation-webui one-click-installers unless you're sure you know how to make a manual install. 1. Click the **Model tab**. 2. Under **Download custom model or LoRA**, enter `TheBloke/Mistral-7B-Instruct-v0.2-GPTQ`. - To download from a specific branch, enter for example `TheBloke/Mistral-7B-Instruct-v0.2-GPTQ:gptq-4bit-32g-actorder_True` - see Provided Files above for the list of branches for each option. 3. Click **Download**. 4. The model will start downloading. Once it's finished it will say "Done". 5. In the top left, click the refresh icon next to **Model**. 6. In the **Model** dropdown, choose the model you just downloaded: `Mistral-7B-Instruct-v0.2-GPTQ` 7. The model will automatically load, and is now ready for use! 8. If you want any custom settings, set them and then click **Save settings for this model** followed by **Reload the Model** in the top right. - Note that you do not need to and should not set manual GPTQ parameters any more. These are set automatically from the file `quantize_config.json`. 9. Once you're ready, click the **Text Generation** tab and enter a prompt to get started! <!-- README_GPTQ.md-text-generation-webui end --> <!-- README_GPTQ.md-use-from-tgi start --> ## Serving this model from Text Generation Inference (TGI) It's recommended to use TGI version 1.1.0 or later. The official Docker container is: `ghcr.io/huggingface/text-generation-inference:1.1.0` Example Docker parameters: ```shell --model-id TheBloke/Mistral-7B-Instruct-v0.2-GPTQ --port 3000 --quantize gptq --max-input-length 3696 --max-total-tokens 4096 --max-batch-prefill-tokens 4096 ``` Example Python code for interfacing with TGI (requires huggingface-hub 0.17.0 or later): ```shell pip3 install huggingface-hub ``` ```python from huggingface_hub import InferenceClient endpoint_url = "https://your-endpoint-url-here" prompt = "Tell me about AI" prompt_template=f'''<s>[INST] {prompt} [/INST] ''' client = InferenceClient(endpoint_url) response = client.text_generation(prompt, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.95, top_k=40, repetition_penalty=1.1) print(f"Model output: {response}") ``` <!-- README_GPTQ.md-use-from-tgi end --> <!-- README_GPTQ.md-use-from-python start --> ## Python code example: inference from this GPTQ model ### Install the necessary packages Requires: Transformers 4.33.0 or later, Optimum 1.12.0 or later, and AutoGPTQ 0.4.2 or later. ```shell pip3 install --upgrade transformers optimum # If using PyTorch 2.1 + CUDA 12.x: pip3 install --upgrade auto-gptq # or, if using PyTorch 2.1 + CUDA 11.x: pip3 install --upgrade auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/ ``` If you are using PyTorch 2.0, you will need to install AutoGPTQ from source. Likewise if you have problems with the pre-built wheels, you should try building from source: ```shell pip3 uninstall -y auto-gptq git clone https://github.com/PanQiWei/AutoGPTQ cd AutoGPTQ git checkout v0.5.1 pip3 install . ``` ### Example Python code ```python from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_name_or_path = "TheBloke/Mistral-7B-Instruct-v0.2-GPTQ" # To use a different branch, change revision # For example: revision="gptq-4bit-32g-actorder_True" model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto", trust_remote_code=False, revision="main") tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True) prompt = "Tell me about AI" prompt_template=f'''<s>[INST] {prompt} [/INST] ''' print("\n\n*** Generate:") input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda() output = model.generate(inputs=input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512) print(tokenizer.decode(output[0])) # Inference can also be done using transformers' pipeline print("*** Pipeline:") pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, do_sample=True, temperature=0.7, top_p=0.95, top_k=40, repetition_penalty=1.1 ) print(pipe(prompt_template)[0]['generated_text']) ``` <!-- README_GPTQ.md-use-from-python end --> <!-- README_GPTQ.md-compatibility start --> ## Compatibility The files provided are tested to work with Transformers. For non-Mistral models, AutoGPTQ can also be used directly. [ExLlama](https://github.com/turboderp/exllama) is compatible with Llama and Mistral models in 4-bit. Please see the Provided Files table above for per-file compatibility. For a list of clients/servers, please see "Known compatible clients / servers", above. <!-- README_GPTQ.md-compatibility end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Michael Levine, 阿明, Trailburnt, Nikolai Manek, John Detwiler, Randy H, Will Dee, Sebastain Graf, NimbleBox.ai, Eugene Pentland, Emad Mostaque, Ai Maven, Jim Angel, Jeff Scroggin, Michael Davis, Manuel Alberto Morcote, Stephen Murray, Robert, Justin Joy, Luke @flexchar, Brandon Frisco, Elijah Stavena, S_X, Dan Guido, Undi ., Komninos Chatzipapas, Shadi, theTransient, Lone Striker, Raven Klaugh, jjj, Cap'n Zoog, Michel-Marie MAUDET (LINAGORA), Matthew Berman, David, Fen Risland, Omer Bin Jawed, Luke Pendergrass, Kalila, OG, Erik Bjäreholt, Rooh Singh, Joseph William Delisle, Dan Lewis, TL, John Villwock, AzureBlack, Brad, Pedro Madruga, Caitlyn Gatomon, K, jinyuan sun, Mano Prime, Alex, Jeffrey Morgan, Alicia Loh, Illia Dulskyi, Chadd, transmissions 11, fincy, Rainer Wilmers, ReadyPlayerEmma, knownsqashed, Mandus, biorpg, Deo Leter, Brandon Phillips, SuperWojo, Sean Connelly, Iucharbius, Jack West, Harry Royden McLaughlin, Nicholas, terasurfer, Vitor Caleffi, Duane Dunston, Johann-Peter Hartmann, David Ziegler, Olakabola, Ken Nordquist, Trenton Dambrowitz, Tom X Nguyen, Vadim, Ajan Kanaga, Leonard Tan, Clay Pascal, Alexandros Triantafyllidis, JM33133, Xule, vamX, ya boyyy, subjectnull, Talal Aujan, Alps Aficionado, wassieverse, Ari Malik, James Bentley, Woland, Spencer Kim, Michael Dempsey, Fred von Graf, Elle, zynix, William Richards, Stanislav Ovsiannikov, Edmond Seymore, Jonathan Leane, Martin Kemka, usrbinkat, Enrico Ros Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> # Original model card: Mistral AI_'s Mistral 7B Instruct v0.2 # Model Card for Mistral-7B-Instruct-v0.2 The Mistral-7B-Instruct-v0.2 Large Language Model (LLM) is an improved instruct fine-tuned version of [Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1). For full details of this model please read our [paper](https://arxiv.org/abs/2310.06825) and [release blog post](https://mistral.ai/news/la-plateforme/). ## Instruction format In order to leverage instruction fine-tuning, your prompt should be surrounded by `[INST]` and `[/INST]` tokens. The very first instruction should begin with a begin of sentence id. The next instructions should not. The assistant generation will be ended by the end-of-sentence token id. E.g. ``` text = "<s>[INST] What is your favourite condiment? [/INST]" "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!</s> " "[INST] Do you have mayonnaise recipes? [/INST]" ``` This format is available as a [chat template](https://huggingface.co/docs/transformers/main/chat_templating) via the `apply_chat_template()` method: ```python from transformers import AutoModelForCausalLM, AutoTokenizer device = "cuda" # the device to load the model onto model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1") tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1") messages = [ {"role": "user", "content": "What is your favourite condiment?"}, {"role": "assistant", "content": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"}, {"role": "user", "content": "Do you have mayonnaise recipes?"} ] encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt") model_inputs = encodeds.to(device) model.to(device) generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True) decoded = tokenizer.batch_decode(generated_ids) print(decoded[0]) ``` ## Model Architecture This instruction model is based on Mistral-7B-v0.1, a transformer model with the following architecture choices: - Grouped-Query Attention - Sliding-Window Attention - Byte-fallback BPE tokenizer ## Troubleshooting - If you see the following error: ``` Traceback (most recent call last): File "", line 1, in File "/transformers/models/auto/auto_factory.py", line 482, in from_pretrained config, kwargs = AutoConfig.from_pretrained( File "/transformers/models/auto/configuration_auto.py", line 1022, in from_pretrained config_class = CONFIG_MAPPING[config_dict["model_type"]] File "/transformers/models/auto/configuration_auto.py", line 723, in getitem raise KeyError(key) KeyError: 'mistral' ``` Installing transformers from source should solve the issue pip install git+https://github.com/huggingface/transformers This should not be required after transformers-v4.33.4. ## Limitations The Mistral 7B Instruct model is a quick demonstration that the base model can be easily fine-tuned to achieve compelling performance. It does not have any moderation mechanisms. We're looking forward to engaging with the community on ways to make the model finely respect guardrails, allowing for deployment in environments requiring moderated outputs. ## The Mistral AI Team Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, Gianna Lengyel, Guillaume Bour, Guillaume Lample, Lélio Renard Lavaud, Louis Ternon, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Théophile Gervet, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed.
timm/eva02_large_patch14_448.mim_in22k_ft_in22k_in1k
timm
"2024-02-10T23:37:42Z"
110,909
0
timm
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "dataset:imagenet-22k", "arxiv:2303.11331", "arxiv:2303.15389", "license:mit", "region:us" ]
image-classification
"2023-03-31T04:37:29Z"
--- license: mit library_name: timm tags: - image-classification - timm datasets: - imagenet-1k - imagenet-22k --- # Model card for eva02_large_patch14_448.mim_in22k_ft_in22k_in1k An EVA02 image classification model. Pretrained on ImageNet-22k with masked image modeling (using EVA-CLIP as a MIM teacher) and fine-tuned on ImageNet-22k then on ImageNet-1k by paper authors. EVA-02 models are vision transformers with mean pooling, SwiGLU, Rotary Position Embeddings (ROPE), and extra LN in MLP (for Base & Large). NOTE: `timm` checkpoints are float32 for consistency with other models. Original checkpoints are float16 or bfloat16 in some cases, see originals if that's preferred. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 305.1 - GMACs: 362.3 - Activations (M): 689.9 - Image size: 448 x 448 - **Papers:** - EVA-02: A Visual Representation for Neon Genesis: https://arxiv.org/abs/2303.11331 - EVA-CLIP: Improved Training Techniques for CLIP at Scale: https://arxiv.org/abs/2303.15389 - **Original:** - https://github.com/baaivision/EVA - https://huggingface.co/Yuxin-CV/EVA-02 - **Pretrain Dataset:** ImageNet-22k - **Dataset:** ImageNet-1k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('eva02_large_patch14_448.mim_in22k_ft_in22k_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'eva02_large_patch14_448.mim_in22k_ft_in22k_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1025, 1024) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). |model |top1 |top5 |param_count|img_size| |-----------------------------------------------|------|------|-----------|--------| |eva02_large_patch14_448.mim_m38m_ft_in22k_in1k |90.054|99.042|305.08 |448 | |eva02_large_patch14_448.mim_in22k_ft_in22k_in1k|89.946|99.01 |305.08 |448 | |eva_giant_patch14_560.m30m_ft_in22k_in1k |89.792|98.992|1014.45 |560 | |eva02_large_patch14_448.mim_in22k_ft_in1k |89.626|98.954|305.08 |448 | |eva02_large_patch14_448.mim_m38m_ft_in1k |89.57 |98.918|305.08 |448 | |eva_giant_patch14_336.m30m_ft_in22k_in1k |89.56 |98.956|1013.01 |336 | |eva_giant_patch14_336.clip_ft_in1k |89.466|98.82 |1013.01 |336 | |eva_large_patch14_336.in22k_ft_in22k_in1k |89.214|98.854|304.53 |336 | |eva_giant_patch14_224.clip_ft_in1k |88.882|98.678|1012.56 |224 | |eva02_base_patch14_448.mim_in22k_ft_in22k_in1k |88.692|98.722|87.12 |448 | |eva_large_patch14_336.in22k_ft_in1k |88.652|98.722|304.53 |336 | |eva_large_patch14_196.in22k_ft_in22k_in1k |88.592|98.656|304.14 |196 | |eva02_base_patch14_448.mim_in22k_ft_in1k |88.23 |98.564|87.12 |448 | |eva_large_patch14_196.in22k_ft_in1k |87.934|98.504|304.14 |196 | |eva02_small_patch14_336.mim_in22k_ft_in1k |85.74 |97.614|22.13 |336 | |eva02_tiny_patch14_336.mim_in22k_ft_in1k |80.658|95.524|5.76 |336 | ## Citation ```bibtex @article{EVA02, title={EVA-02: A Visual Representation for Neon Genesis}, author={Fang, Yuxin and Sun, Quan and Wang, Xinggang and Huang, Tiejun and Wang, Xinlong and Cao, Yue}, journal={arXiv preprint arXiv:2303.11331}, year={2023} } ``` ```bibtex @article{EVA-CLIP, title={EVA-02: A Visual Representation for Neon Genesis}, author={Sun, Quan and Fang, Yuxin and Wu, Ledell and Wang, Xinlong and Cao, Yue}, journal={arXiv preprint arXiv:2303.15389}, year={2023} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
Qwen/Qwen-14B-Chat
Qwen
"2023-12-13T15:44:33Z"
110,602
351
transformers
[ "transformers", "safetensors", "qwen", "text-generation", "custom_code", "zh", "en", "arxiv:2309.16609", "arxiv:2305.08322", "arxiv:2009.03300", "arxiv:2305.05280", "arxiv:2210.03629", "autotrain_compatible", "has_space", "region:us" ]
text-generation
"2023-09-24T03:27:58Z"
--- language: - zh - en tags: - qwen pipeline_tag: text-generation inference: false --- # Qwen-14B-Chat <p align="center"> <img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/logo_qwen.jpg" width="400"/> <p> <br> <p align="center"> 🤗 <a href="https://huggingface.co/Qwen">Hugging Face</a>&nbsp&nbsp | &nbsp&nbsp🤖 <a href="https://modelscope.cn/organization/qwen">ModelScope</a>&nbsp&nbsp | &nbsp&nbsp 📑 <a href="https://arxiv.org/abs/2309.16609">Paper</a> &nbsp&nbsp | &nbsp&nbsp🖥️ <a href="https://modelscope.cn/studios/qwen/Qwen-14B-Chat-Demo/summary">Demo</a> <br> <a href="https://github.com/QwenLM/Qwen/blob/main/assets/wechat.png">WeChat (微信)</a>&nbsp&nbsp | &nbsp&nbsp<a href="https://discord.gg/z3GAxXZ9Ce">Discord</a>&nbsp&nbsp | &nbsp&nbsp<a href="https://dashscope.aliyun.com">API</a> </p> <br> ## 介绍(Introduction) **通义千问-14B(Qwen-14B)**是阿里云研发的通义千问大模型系列的140亿参数规模的模型。Qwen-14B是基于Transformer的大语言模型, 在超大规模的预训练数据上进行训练得到。预训练数据类型多样,覆盖广泛,包括大量网络文本、专业书籍、代码等。同时,在Qwen-14B的基础上,我们使用对齐机制打造了基于大语言模型的AI助手Qwen-14B-Chat。本仓库为Qwen-14B-Chat的仓库。 如果您想了解更多关于通义千问-14B开源模型的细节,我们建议您参阅[GitHub代码库](https://github.com/QwenLM/Qwen)。 **Qwen-14B** is the 14B-parameter version of the large language model series, Qwen (abbr. Tongyi Qianwen), proposed by Alibaba Cloud. Qwen-14B is a Transformer-based large language model, which is pretrained on a large volume of data, including web texts, books, codes, etc. Additionally, based on the pretrained Qwen-14B, we release Qwen-14B-Chat, a large-model-based AI assistant, which is trained with alignment techniques. This repository is the one for Qwen-14B-Chat. For more details about the open-source model of Qwen-14B, please refer to the [GitHub](https://github.com/QwenLM/Qwen) code repository. <br> ## 要求(Requirements) * python 3.8及以上版本 * pytorch 1.12及以上版本,推荐2.0及以上版本 * 建议使用CUDA 11.4及以上(GPU用户、flash-attention用户等需考虑此选项) * python 3.8 and above * pytorch 1.12 and above, 2.0 and above are recommended * CUDA 11.4 and above are recommended (this is for GPU users, flash-attention users, etc.) <br> ## 依赖项(Dependency) 运行Qwen-14B-Chat,请确保满足上述要求,再执行以下pip命令安装依赖库 To run Qwen-14B-Chat, please make sure you meet the above requirements, and then execute the following pip commands to install the dependent libraries. ```bash pip install transformers==4.32.0 accelerate tiktoken einops scipy transformers_stream_generator==0.0.4 peft deepspeed ``` 另外,推荐安装`flash-attention`库(**当前已支持flash attention 2**),以实现更高的效率和更低的显存占用。 In addition, it is recommended to install the `flash-attention` library (**we support flash attention 2 now.**) for higher efficiency and lower memory usage. ```bash git clone https://github.com/Dao-AILab/flash-attention cd flash-attention && pip install . # 下方安装可选,安装可能比较缓慢。 # pip install csrc/layer_norm # pip install csrc/rotary ``` <br> ## 快速使用(Quickstart) 下面我们展示了一个使用Qwen-14B-Chat模型,进行多轮对话交互的样例: We show an example of multi-turn interaction with Qwen-14B-Chat in the following code: ```python from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation import GenerationConfig # Note: The default behavior now has injection attack prevention off. tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-14B-Chat", trust_remote_code=True) # use bf16 # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-14B-Chat", device_map="auto", trust_remote_code=True, bf16=True).eval() # use fp16 # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-14B-Chat", device_map="auto", trust_remote_code=True, fp16=True).eval() # use cpu only # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-14B-Chat", device_map="cpu", trust_remote_code=True).eval() # use auto mode, automatically select precision based on the device. model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-14B-Chat", device_map="auto", trust_remote_code=True).eval() # Specify hyperparameters for generation. But if you use transformers>=4.32.0, there is no need to do this. # model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-14B-Chat", trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参 # 第一轮对话 1st dialogue turn response, history = model.chat(tokenizer, "你好", history=None) print(response) # 你好!很高兴为你提供帮助。 # 第二轮对话 2nd dialogue turn response, history = model.chat(tokenizer, "给我讲一个年轻人奋斗创业最终取得成功的故事。", history=history) print(response) # 这是一个关于一个年轻人奋斗创业最终取得成功的故事。 # 故事的主人公叫李明,他来自一个普通的家庭,父母都是普通的工人。从小,李明就立下了一个目标:要成为一名成功的企业家。 # 为了实现这个目标,李明勤奋学习,考上了大学。在大学期间,他积极参加各种创业比赛,获得了不少奖项。他还利用课余时间去实习,积累了宝贵的经验。 # 毕业后,李明决定开始自己的创业之路。他开始寻找投资机会,但多次都被拒绝了。然而,他并没有放弃。他继续努力,不断改进自己的创业计划,并寻找新的投资机会。 # 最终,李明成功地获得了一笔投资,开始了自己的创业之路。他成立了一家科技公司,专注于开发新型软件。在他的领导下,公司迅速发展起来,成为了一家成功的科技企业。 # 李明的成功并不是偶然的。他勤奋、坚韧、勇于冒险,不断学习和改进自己。他的成功也证明了,只要努力奋斗,任何人都有可能取得成功。 # 第三轮对话 3rd dialogue turn response, history = model.chat(tokenizer, "给这个故事起一个标题", history=history) print(response) # 《奋斗创业:一个年轻人的成功之路》 ``` 关于更多的使用说明,请参考我们的[GitHub repo](https://github.com/QwenLM/Qwen)获取更多信息。 For more information, please refer to our [GitHub repo](https://github.com/QwenLM/Qwen) for more information. <br> ## 量化 (Quantization) ### 用法 (Usage) **请注意:我们更新量化方案为基于[AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ)的量化,提供Qwen-14B-Chat的Int4量化模型[点击这里](https://huggingface.co/Qwen/Qwen-14B-Chat-Int4)。相比此前方案,该方案在模型评测效果几乎无损,且存储需求更低,推理速度更优。** **Note: we provide a new solution based on [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ), and release an Int4 quantized model for Qwen-14B-Chat [Click here](https://huggingface.co/Qwen/Qwen-14B-Chat-Int4), which achieves nearly lossless model effects but improved performance on both memory costs and inference speed, in comparison with the previous solution.** 以下我们提供示例说明如何使用Int4量化模型。在开始使用前,请先保证满足要求(如torch 2.0及以上,transformers版本为4.32.0及以上,等等),并安装所需安装包: Here we demonstrate how to use our provided quantized models for inference. Before you start, make sure you meet the requirements of auto-gptq (e.g., torch 2.0 and above, transformers 4.32.0 and above, etc.) and install the required packages: ```bash pip install auto-gptq optimum ``` 如安装`auto-gptq`遇到问题,我们建议您到官方[repo](https://github.com/PanQiWei/AutoGPTQ)搜索合适的预编译wheel。 随后即可使用和上述一致的用法调用量化模型: If you meet problems installing `auto-gptq`, we advise you to check out the official [repo](https://github.com/PanQiWei/AutoGPTQ) to find a pre-build wheel. Then you can load the quantized model easily and run inference as same as usual: ```python model = AutoModelForCausalLM.from_pretrained( "Qwen/Qwen-14B-Chat-Int4", device_map="auto", trust_remote_code=True ).eval() response, history = model.chat(tokenizer, "你好", history=None) ``` ### 效果评测 我们对BF16,Int8和Int4模型在基准评测上做了测试(使用zero-shot设置),发现量化模型效果损失较小,结果如下所示: We illustrate the zero-shot performance of both BF16, Int8 and Int4 models on the benchmark, and we find that the quantized model does not suffer from significant performance degradation. Results are shown below: | Quantization | MMLU | CEval (val) | GSM8K | Humaneval | |--------------|:----:|:-----------:|:-----:|:---------:| | BF16 | 64.6 | 69.8 | 60.1 | 43.9 | | Int8 | 63.6 | 68.6 | 60.0 | 48.2 | | Int4 | 63.3 | 69.0 | 59.8 | 45.7 | ### 推理速度 (Inference Speed) 我们测算了不同精度模型以及不同FlashAttn库版本下模型生成2048和8192个token的平均推理速度。如图所示: We measured the average inference speed of generating 2048 and 8192 tokens with different quantization levels and versions of flash-attention, respectively. | Quantization | FlashAttn | Speed (2048 tokens) | Speed (8192 tokens) | | ------------- | :-------: | :------------------:| :------------------:| | BF16 | v2 | 32.88 | 24.87 | | Int8 | v2 | 29.28 | 24.22 | | Int4 | v2 | 38.72 | 27.33 | | BF16 | v1 | 32.76 | 28.89 | | Int8 | v1 | 28.31 | 23.87 | | Int4 | v1 | 37.81 | 26.46 | | BF16 | Disabled | 29.32 | 22.91 | | Int8 | Disabled | 31.12 | 24.60 | | Int4 | Disabled | 37.65 | 26.00 | 具体而言,我们记录在长度为1的上下文的条件下生成8192个token的性能。评测运行于单张A100-SXM4-80G GPU,使用PyTorch 2.0.1和CUDA 11.8。推理速度是生成8192个token的速度均值。 In detail, the setting of profiling is generating 8192 new tokens with 1 context token. The profiling runs on a single A100-SXM4-80G GPU with PyTorch 2.0.1 and CUDA 11.8. The inference speed is averaged over the generated 8192 tokens. 注意:以上Int4/Int8模型生成速度使用autogptq库给出,当前``AutoModelForCausalLM.from_pretrained``载入的模型生成速度会慢大约20%。我们已经将该问题汇报给HuggingFace团队,若有解决方案将即时更新。 Note: The generation speed of the Int4/Int8 models mentioned above is provided by the autogptq library. The current speed of the model loaded using "AutoModelForCausalLM.from_pretrained" will be approximately 20% slower. We have reported this issue to the HuggingFace team and will update it promptly if a solution is available. ### 显存使用 (GPU Memory Usage) 我们还测算了不同模型精度编码2048个token及生成8192个token的峰值显存占用情况。(显存消耗在是否使用FlashAttn的情况下均类似。)结果如下所示: We also profile the peak GPU memory usage for encoding 2048 tokens as context (and generating single token) and generating 8192 tokens (with single token as context) under different quantization levels, respectively. (The GPU memory usage is similar when using flash-attention or not.)The results are shown below. | Quantization Level | Peak Usage for Encoding 2048 Tokens | Peak Usage for Generating 8192 Tokens | | ------------------ | :---------------------------------: | :-----------------------------------: | | BF16 | 30.15GB | 38.94GB | | Int8 | 18.81GB | 27.54GB | | Int4 | 13.01GB | 21.79GB | 上述性能测算使用[此脚本](https://qianwen-res.oss-cn-beijing.aliyuncs.com/profile.py)完成。 The above speed and memory profiling are conducted using [this script](https://qianwen-res.oss-cn-beijing.aliyuncs.com/profile.py). <br> ## 模型细节(Model) 与Qwen-14B预训练模型相同,Qwen-14B-Chat模型规模基本情况如下所示 The details of the model architecture of Qwen-14B-Chat are listed as follows | Hyperparameter | Value | |:----------------|:------:| | n_layers | 40 | | n_heads | 40 | | d_model | 5120 | | vocab size | 151851 | | sequence length | 2048 | 在位置编码、FFN激活函数和normalization的实现方式上,我们也采用了目前最流行的做法, 即RoPE相对位置编码、SwiGLU激活函数、RMSNorm(可选安装flash-attention加速)。 在分词器方面,相比目前主流开源模型以中英词表为主,Qwen-14B-Chat使用了约15万token大小的词表。 该词表在GPT-4使用的BPE词表`cl100k_base`基础上,对中文、多语言进行了优化,在对中、英、代码数据的高效编解码的基础上,对部分多语言更加友好,方便用户在不扩展词表的情况下对部分语种进行能力增强。 词表对数字按单个数字位切分。调用较为高效的[tiktoken分词库](https://github.com/openai/tiktoken)进行分词。 For position encoding, FFN activation function, and normalization calculation methods, we adopt the prevalent practices, i.e., RoPE relative position encoding, SwiGLU for activation function, and RMSNorm for normalization (optional installation of flash-attention for acceleration). For tokenization, compared to the current mainstream open-source models based on Chinese and English vocabularies, Qwen-14B-Chat uses a vocabulary of over 150K tokens. It first considers efficient encoding of Chinese, English, and code data, and is also more friendly to multilingual languages, enabling users to directly enhance the capability of some languages without expanding the vocabulary. It segments numbers by single digit, and calls the [tiktoken](https://github.com/openai/tiktoken) tokenizer library for efficient tokenization. <br> ## 评测效果(Evaluation) 对于Qwen-14B-Chat模型,我们同样评测了常规的中文理解(C-Eval)、英文理解(MMLU)、代码(HumanEval)和数学(GSM8K)等权威任务,同时包含了长序列任务的评测结果。由于Qwen-14B-Chat模型经过对齐后,激发了较强的外部系统调用能力,我们还进行了工具使用能力方面的评测。 提示:由于硬件和框架造成的舍入误差,复现结果如有波动属于正常现象。 For Qwen-14B-Chat, we also evaluate the model on C-Eval, MMLU, HumanEval, GSM8K, etc., as well as the benchmark evaluation for long-context understanding, and tool usage. Note: Due to rounding errors caused by hardware and framework, differences in reproduced results are possible. ### 中文评测(Chinese Evaluation) #### C-Eval 在[C-Eval](https://arxiv.org/abs/2305.08322)验证集上,我们评价了Qwen-14B-Chat模型的0-shot & 5-shot准确率 We demonstrate the 0-shot & 5-shot accuracy of Qwen-14B-Chat on C-Eval validation set | Model | Avg. Acc. | |:--------------------------------:|:---------:| | LLaMA2-7B-Chat | 31.9 | | LLaMA2-13B-Chat | 36.2 | | LLaMA2-70B-Chat | 44.3 | | ChatGLM2-6B-Chat | 52.6 | | InternLM-7B-Chat | 53.6 | | Baichuan2-7B-Chat | 55.6 | | Baichuan2-13B-Chat | 56.7 | | Qwen-7B-Chat (original) (0-shot) | 54.2 | | **Qwen-7B-Chat (0-shot)** | 59.7 | | **Qwen-7B-Chat (5-shot)** | 59.3 | | **Qwen-14B-Chat (0-shot)** | 69.8 | | **Qwen-14B-Chat (5-shot)** | **71.7** | C-Eval测试集上,Qwen-14B-Chat模型的zero-shot准确率结果如下: The zero-shot accuracy of Qwen-14B-Chat on C-Eval testing set is provided below: | Model | Avg. | STEM | Social Sciences | Humanities | Others | | :---------------------- | :------: | :--: | :-------------: | :--------: | :----: | | Chinese-Alpaca-Plus-13B | 41.5 | 36.6 | 49.7 | 43.1 | 41.2 | | Chinese-Alpaca-2-7B | 40.3 | - | - | - | - | | ChatGLM2-6B-Chat | 50.1 | 46.4 | 60.4 | 50.6 | 46.9 | | Baichuan-13B-Chat | 51.5 | 43.7 | 64.6 | 56.2 | 49.2 | | Qwen-7B-Chat (original) | 54.6 | 47.8 | 67.6 | 59.3 | 50.6 | | **Qwen-7B-Chat** | 58.6 | 53.3 | 72.1 | 62.8 | 52.0 | | **Qwen-14B-Chat** | **69.1** | 65.1 | 80.9 | 71.2 | 63.4 | 在14B规模模型上,经过人类指令对齐的Qwen-14B-Chat模型,准确率在同类相近规模模型中仍然处于前列。 Compared with other pretrained models with comparable model size, the human-aligned Qwen-14B-Chat performs well in C-Eval accuracy. ### 英文评测(English Evaluation) #### MMLU [MMLU](https://arxiv.org/abs/2009.03300)评测集上,Qwen-14B-Chat模型的 0-shot & 5-shot 准确率如下,效果同样在同类对齐模型中同样表现较优。 The 0-shot & 5-shot accuracy of Qwen-14B-Chat on MMLU is provided below. The performance of Qwen-14B-Chat still on the top between other human-aligned models with comparable size. | Model | Avg. Acc. | |:--------------------------------:|:---------:| | ChatGLM2-6B-Chat | 46.0 | | LLaMA2-7B-Chat | 46.2 | | InternLM-7B-Chat | 51.1 | | Baichuan2-7B-Chat | 52.9 | | LLaMA2-13B-Chat | 54.6 | | Baichuan2-13B-Chat | 57.3 | | LLaMA2-70B-Chat | 63.8 | | Qwen-7B-Chat (original) (0-shot) | 53.9 | | **Qwen-7B-Chat (0-shot)** | 55.8 | | **Qwen-7B-Chat (5-shot)** | 57.0 | | **Qwen-14B-Chat (0-shot)** | 64.6 | | **Qwen-14B-Chat (5-shot)** | **66.5** | ### 代码评测(Coding Evaluation) Qwen-14B-Chat在[HumanEval](https://github.com/openai/human-eval)的zero-shot Pass@1效果如下 The zero-shot Pass@1 of Qwen-14B-Chat on [HumanEval](https://github.com/openai/human-eval) is demonstrated below | Model | Pass@1 | |:-----------------------:|:--------:| | ChatGLM2-6B-Chat | 11.0 | | LLaMA2-7B-Chat | 12.2 | | InternLM-7B-Chat | 14.6 | | Baichuan2-7B-Chat | 13.4 | | LLaMA2-13B-Chat | 18.9 | | Baichuan2-13B-Chat | 17.7 | | LLaMA2-70B-Chat | 32.3 | | Qwen-7B-Chat (original) | 24.4 | | **Qwen-7B-Chat** | 37.2 | | **Qwen-14B-Chat** | **43.9** | ### 数学评测(Mathematics Evaluation) 在评测数学能力的[GSM8K](https://github.com/openai/grade-school-math)上,Qwen-14B-Chat的准确率结果如下 The accuracy of Qwen-14B-Chat on GSM8K is shown below | Model | Acc. | |:--------------------------------:|:--------:| | LLaMA2-7B-Chat | 26.3 | | ChatGLM2-6B-Chat | 28.8 | | Baichuan2-7B-Chat | 32.8 | | InternLM-7B-Chat | 33.0 | | LLaMA2-13B-Chat | 37.1 | | Baichuan2-13B-Chat | 55.3 | | LLaMA2-70B-Chat | 59.3 | | Qwen-7B-Chat (original) (0-shot) | 41.1 | | **Qwen-7B-Chat (0-shot)** | 50.3 | | **Qwen-7B-Chat (8-shot)** | 54.1 | | **Qwen-14B-Chat (0-shot)** | **60.1** | | **Qwen-14B-Chat (8-shot)** | 59.3 | ### 长序列评测(Long-Context Understanding) 通过NTK插值,LogN注意力缩放可以扩展Qwen-14B-Chat的上下文长度。在长文本摘要数据集[VCSUM](https://arxiv.org/abs/2305.05280)上(文本平均长度在15K左右),Qwen-14B-Chat的Rouge-L结果如下: **(若要启用这些技巧,请将config.json里的`use_dynamic_ntk`和`use_logn_attn`设置为true)** We introduce NTK-aware interpolation, LogN attention scaling to extend the context length of Qwen-14B-Chat. The Rouge-L results of Qwen-14B-Chat on long-text summarization dataset [VCSUM](https://arxiv.org/abs/2305.05280) (The average length of this dataset is around 15K) are shown below: **(To use these tricks, please set `use_dynamic_ntk` and `use_long_attn` to true in config.json.)** | Model | VCSUM (zh) | |:------------------|:----------:| | GPT-3.5-Turbo-16k | 16.0 | | LLama2-7B-Chat | 0.2 | | InternLM-7B-Chat | 13.0 | | ChatGLM2-6B-Chat | 16.3 | | **Qwen-14B-Chat** | **17.3** | ### 工具使用能力的评测(Tool Usage) #### ReAct Prompting 千问支持通过 [ReAct Prompting](https://arxiv.org/abs/2210.03629) 调用插件/工具/API。ReAct 也是 [LangChain](https://python.langchain.com/) 框架采用的主要方式之一。在我们开源的、用于评估工具使用能力的评测基准上,千问的表现如下: Qwen-Chat supports calling plugins/tools/APIs through [ReAct Prompting](https://arxiv.org/abs/2210.03629). ReAct is also one of the main approaches used by the [LangChain](https://python.langchain.com/) framework. In our evaluation benchmark for assessing tool usage capabilities, Qwen-Chat's performance is as follows: <table> <tr> <th colspan="4" align="center">Chinese Tool-Use Benchmark</th> </tr> <tr> <th align="center">Model</th><th align="center">Tool Selection (Acc.↑)</th><th align="center">Tool Input (Rouge-L↑)</th><th align="center">False Positive Error↓</th> </tr> <tr> <td>GPT-4</td><td align="center">95%</td><td align="center">0.90</td><td align="center">15.0%</td> </tr> <tr> <td>GPT-3.5</td><td align="center">85%</td><td align="center">0.88</td><td align="center">75.0%</td> </tr> <tr> <td>Qwen-7B-Chat</td><td align="center">98%</td><td align="center">0.91</td><td align="center">7.3%</td> </tr> <tr> <td>Qwen-14B-Chat</td><td align="center">98%</td><td align="center">0.93</td><td align="center">2.4%</td> </tr> </table> > 评测基准中出现的插件均没有出现在千问的训练集中。该基准评估了模型在多个候选插件中选择正确插件的准确率、传入插件的参数的合理性、以及假阳率。假阳率(False Positive)定义:在处理不该调用插件的请求时,错误地调用了插件。 > The plugins that appear in the evaluation set do not appear in the training set of Qwen. This benchmark evaluates the accuracy of the model in selecting the correct plugin from multiple candidate plugins, the rationality of the parameters passed into the plugin, and the false positive rate. False Positive: Incorrectly invoking a plugin when it should not have been called when responding to a query. ![](assets/react_showcase_001.png) ![](assets/react_showcase_002.png) #### Code Interpreter 为了考察Qwen使用Python Code Interpreter完成数学解题、数据可视化、及文件处理与爬虫等任务的能力,我们专门建设并开源了一个评测这方面能力的[评测基准](https://github.com/QwenLM/Qwen-Agent/tree/main/benchmark)。 我们发现Qwen在生成代码的可执行率、结果正确性上均表现较好: To assess Qwen's ability to use the Python Code Interpreter for tasks such as mathematical problem solving, data visualization, and other general-purpose tasks such as file handling and web scraping, we have created and open-sourced a benchmark specifically designed for evaluating these capabilities. You can find the benchmark at this [link](https://github.com/QwenLM/Qwen-Agent/tree/main/benchmark). We have observed that Qwen performs well in terms of code executability and result accuracy when generating code: <table> <tr> <th colspan="4" align="center">Executable Rate of Generated Code (%)</th> </tr> <tr> <th align="center">Model</th><th align="center">Math↑</th><th align="center">Visualization↑</th><th align="center">General↑</th> </tr> <tr> <td>GPT-4</td><td align="center">91.9</td><td align="center">85.9</td><td align="center">82.8</td> </tr> <tr> <td>GPT-3.5</td><td align="center">89.2</td><td align="center">65.0</td><td align="center">74.1</td> </tr> <tr> <td>LLaMA2-7B-Chat</td> <td align="center">41.9</td> <td align="center">33.1</td> <td align="center">24.1 </td> </tr> <tr> <td>LLaMA2-13B-Chat</td> <td align="center">50.0</td> <td align="center">40.5</td> <td align="center">48.3 </td> </tr> <tr> <td>CodeLLaMA-7B-Instruct</td> <td align="center">85.1</td> <td align="center">54.0</td> <td align="center">70.7 </td> </tr> <tr> <td>CodeLLaMA-13B-Instruct</td> <td align="center">93.2</td> <td align="center">55.8</td> <td align="center">74.1 </td> </tr> <tr> <td>InternLM-7B-Chat-v1.1</td> <td align="center">78.4</td> <td align="center">44.2</td> <td align="center">62.1 </td> </tr> <tr> <td>InternLM-20B-Chat</td> <td align="center">70.3</td> <td align="center">44.2</td> <td align="center">65.5 </td> </tr> <tr> <td>Qwen-7B-Chat</td> <td align="center">82.4</td> <td align="center">64.4</td> <td align="center">67.2 </td> </tr> <tr> <td>Qwen-14B-Chat</td> <td align="center">89.2</td> <td align="center">84.1</td> <td align="center">65.5</td> </tr> </table> <table> <tr> <th colspan="4" align="center">Accuracy of Code Execution Results (%)</th> </tr> <tr> <th align="center">Model</th><th align="center">Math↑</th><th align="center">Visualization-Hard↑</th><th align="center">Visualization-Easy↑</th> </tr> <tr> <td>GPT-4</td><td align="center">82.8</td><td align="center">66.7</td><td align="center">60.8</td> </tr> <tr> <td>GPT-3.5</td><td align="center">47.3</td><td align="center">33.3</td><td align="center">55.7</td> </tr> <tr> <td>LLaMA2-7B-Chat</td> <td align="center">3.9</td> <td align="center">14.3</td> <td align="center">39.2 </td> </tr> <tr> <td>LLaMA2-13B-Chat</td> <td align="center">8.3</td> <td align="center">8.3</td> <td align="center">40.5 </td> </tr> <tr> <td>CodeLLaMA-7B-Instruct</td> <td align="center">14.3</td> <td align="center">26.2</td> <td align="center">60.8 </td> </tr> <tr> <td>CodeLLaMA-13B-Instruct</td> <td align="center">28.2</td> <td align="center">27.4</td> <td align="center">62.0 </td> </tr> <tr> <td>InternLM-7B-Chat-v1.1</td> <td align="center">28.5</td> <td align="center">4.8</td> <td align="center">40.5 </td> </tr> <tr> <td>InternLM-20B-Chat</td> <td align="center">34.6</td> <td align="center">21.4</td> <td align="center">45.6 </td> </tr> <tr> <td>Qwen-7B-Chat</td> <td align="center">41.9</td> <td align="center">40.5</td> <td align="center">54.4 </td> </tr> <tr> <td>Qwen-14B-Chat</td> <td align="center">58.4</td> <td align="center">53.6</td> <td align="center">59.5</td> </tr> </table> <p align="center"> <br> <img src="assets/code_interpreter_showcase_001.jpg" /> <br> <p> #### Huggingface Agent 千问还具备作为 [HuggingFace Agent](https://huggingface.co/docs/transformers/transformers_agents) 的能力。它在 Huggingface 提供的run模式评测基准上的表现如下: Qwen-Chat also has the capability to be used as a [HuggingFace Agent](https://huggingface.co/docs/transformers/transformers_agents). Its performance on the run-mode benchmark provided by HuggingFace is as follows: <table> <tr> <th colspan="4" align="center">HuggingFace Agent Benchmark- Run Mode</th> </tr> <tr> <th align="center">Model</th><th align="center">Tool Selection↑</th><th align="center">Tool Used↑</th><th align="center">Code↑</th> </tr> <tr> <td>GPT-4</td><td align="center">100</td><td align="center">100</td><td align="center">97.4</td> </tr> <tr> <td>GPT-3.5</td><td align="center">95.4</td><td align="center">96.3</td><td align="center">87.0</td> </tr> <tr> <td>StarCoder-Base-15B</td><td align="center">86.1</td><td align="center">87.0</td><td align="center">68.9</td> </tr> <tr> <td>StarCoder-15B</td><td align="center">87.0</td><td align="center">88.0</td><td align="center">68.9</td> </tr> <tr> <td>Qwen-7B-Chat</td><td align="center">87.0</td><td align="center">87.0</td><td align="center">71.5</td> </tr> <tr> <td>Qwen-14B-Chat</td><td align="center">93.5</td><td align="center">94.4</td><td align="center">87.0</td> </tr> </table> <table> <tr> <th colspan="4" align="center">HuggingFace Agent Benchmark - Chat Mode</th> </tr> <tr> <th align="center">Model</th><th align="center">Tool Selection↑</th><th align="center">Tool Used↑</th><th align="center">Code↑</th> </tr> <tr> <td>GPT-4</td><td align="center">97.9</td><td align="center">97.9</td><td align="center">98.5</td> </tr> <tr> <td>GPT-3.5</td><td align="center">97.3</td><td align="center">96.8</td><td align="center">89.6</td> </tr> <tr> <td>StarCoder-Base-15B</td><td align="center">97.9</td><td align="center">97.9</td><td align="center">91.1</td> </tr> <tr> <td>StarCoder-15B</td><td align="center">97.9</td><td align="center">97.9</td><td align="center">89.6</td> </tr> <tr> <td>Qwen-7B-Chat</td><td align="center">94.7</td><td align="center">94.7</td><td align="center">85.1</td> </tr> <tr> <td>Qwen-14B-Chat</td><td align="center">97.9</td><td align="center">97.9</td><td align="center">95.5</td> </tr> </table> <br> ## FAQ 如遇到问题,敬请查阅[FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ_zh.md)以及issue区,如仍无法解决再提交issue。 If you meet problems, please refer to [FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ.md) and the issues first to search a solution before you launch a new issue. <br> ## 引用 (Citation) 如果你觉得我们的工作对你有帮助,欢迎引用! If you find our work helpful, feel free to give us a cite. ``` @article{qwen, title={Qwen Technical Report}, author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu}, journal={arXiv preprint arXiv:2309.16609}, year={2023} } ``` <br> ## 使用协议(License Agreement) 我们的代码和模型权重对学术研究完全开放,并支持商用。请查看[LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT)了解具体的开源协议细节。如需商用,欢迎填写[问卷](https://dashscope.console.aliyun.com/openModelApply/Qwen-14B-Chat)申请。 Our code and checkpoints are open to research purpose, and they are allowed for commercial purposes. Check [LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) for more details about the license. If you have requirements for commercial use, please fill out the [form](https://dashscope.console.aliyun.com/openModelApply/Qwen-14B-Chat) to apply. <br> ## 联系我们(Contact Us) 如果你想给我们的研发团队和产品团队留言,欢迎加入我们的微信群、钉钉群以及Discord!同时,也欢迎通过邮件(qianwen_opensource@alibabacloud.com)联系我们。 If you are interested to leave a message to either our research team or product team, join our Discord or WeChat groups! Also, feel free to send an email to qianwen_opensource@alibabacloud.com.
microsoft/beit-base-patch16-224
microsoft
"2023-02-27T17:56:38Z"
109,815
8
transformers
[ "transformers", "pytorch", "jax", "beit", "image-classification", "vision", "dataset:imagenet", "dataset:imagenet-21k", "arxiv:2106.08254", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
image-classification
"2022-03-02T23:29:05Z"
--- license: apache-2.0 tags: - image-classification - vision datasets: - imagenet - imagenet-21k --- # BEiT (base-sized model, fine-tuned on ImageNet-1k) BEiT model pre-trained in a self-supervised fashion on ImageNet-21k (14 million images, 21,841 classes) at resolution 224x224, and fine-tuned on ImageNet 2012 (1 million images, 1,000 classes) at resolution 224x224. It was introduced in the paper [BEIT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) by Hangbo Bao, Li Dong and Furu Wei and first released in [this repository](https://github.com/microsoft/unilm/tree/master/beit). Disclaimer: The team releasing BEiT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The BEiT model is a Vision Transformer (ViT), which is a transformer encoder model (BERT-like). In contrast to the original ViT model, BEiT is pretrained on a large collection of images in a self-supervised fashion, namely ImageNet-21k, at a resolution of 224x224 pixels. The pre-training objective for the model is to predict visual tokens from the encoder of OpenAI's DALL-E's VQ-VAE, based on masked patches. Next, the model was fine-tuned in a supervised fashion on ImageNet (also referred to as ILSVRC2012), a dataset comprising 1 million images and 1,000 classes, also at resolution 224x224. Images are presented to the model as a sequence of fixed-size patches (resolution 16x16), which are linearly embedded. Contrary to the original ViT models, BEiT models do use relative position embeddings (similar to T5) instead of absolute position embeddings, and perform classification of images by mean-pooling the final hidden states of the patches, instead of placing a linear layer on top of the final hidden state of the [CLS] token. By pre-training the model, it learns an inner representation of images that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled images for instance, you can train a standard classifier by placing a linear layer on top of the pre-trained encoder. One typically places a linear layer on top of the [CLS] token, as the last hidden state of this token can be seen as a representation of an entire image. Alternatively, one can mean-pool the final hidden states of the patch embeddings, and place a linear layer on top of that. ## Intended uses & limitations You can use the raw model for image classification. See the [model hub](https://huggingface.co/models?search=microsoft/beit) to look for fine-tuned versions on a task that interests you. ### How to use Here is how to use this model to classify an image of the COCO 2017 dataset into one of the 1,000 ImageNet classes: ```python from transformers import BeitImageProcessor, BeitForImageClassification from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) processor = BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224') model = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224') inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]) ``` Currently, both the feature extractor and model support PyTorch. ## Training data The BEiT model was pretrained on [ImageNet-21k](http://www.image-net.org/), a dataset consisting of 14 million images and 21k classes, and fine-tuned on [ImageNet](http://www.image-net.org/challenges/LSVRC/2012/), a dataset consisting of 1 million images and 1k classes. ## Training procedure ### Preprocessing The exact details of preprocessing of images during training/validation can be found [here](https://github.com/microsoft/unilm/blob/master/beit/datasets.py). Images are resized/rescaled to the same resolution (224x224) and normalized across the RGB channels with mean (0.5, 0.5, 0.5) and standard deviation (0.5, 0.5, 0.5). ### Pretraining For all pre-training related hyperparameters, we refer to page 15 of the [original paper](https://arxiv.org/abs/2106.08254). ## Evaluation results For evaluation results on several image classification benchmarks, we refer to tables 1 and 2 of the original paper. Note that for fine-tuning, the best results are obtained with a higher resolution (384x384). Of course, increasing the model size will result in better performance. ### BibTeX entry and citation info ```@article{DBLP:journals/corr/abs-2106-08254, author = {Hangbo Bao and Li Dong and Furu Wei}, title = {BEiT: {BERT} Pre-Training of Image Transformers}, journal = {CoRR}, volume = {abs/2106.08254}, year = {2021}, url = {https://arxiv.org/abs/2106.08254}, archivePrefix = {arXiv}, eprint = {2106.08254}, timestamp = {Tue, 29 Jun 2021 16:55:04 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-2106-08254.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` ```bibtex @inproceedings{deng2009imagenet, title={Imagenet: A large-scale hierarchical image database}, author={Deng, Jia and Dong, Wei and Socher, Richard and Li, Li-Jia and Li, Kai and Fei-Fei, Li}, booktitle={2009 IEEE conference on computer vision and pattern recognition}, pages={248--255}, year={2009}, organization={Ieee} } ```
Salesforce/blip2-flan-t5-xl
Salesforce
"2023-12-13T11:43:54Z"
109,767
49
transformers
[ "transformers", "pytorch", "safetensors", "blip-2", "visual-question-answering", "vision", "image-to-text", "image-captioning", "en", "arxiv:2301.12597", "arxiv:2210.11416", "license:mit", "has_space", "region:us" ]
image-to-text
"2023-02-06T20:28:29Z"
--- language: en license: mit tags: - vision - image-to-text - image-captioning - visual-question-answering pipeline_tag: image-to-text inference: false --- # BLIP-2, Flan T5-xl, pre-trained only BLIP-2 model, leveraging [Flan T5-xl](https://huggingface.co/google/flan-t5-xl) (a large language model). It was introduced in the paper [BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models](https://arxiv.org/abs/2301.12597) by Li et al. and first released in [this repository](https://github.com/salesforce/LAVIS/tree/main/projects/blip2). Disclaimer: The team releasing BLIP-2 did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description BLIP-2 consists of 3 models: a CLIP-like image encoder, a Querying Transformer (Q-Former) and a large language model. The authors initialize the weights of the image encoder and large language model from pre-trained checkpoints and keep them frozen while training the Querying Transformer, which is a BERT-like Transformer encoder that maps a set of "query tokens" to query embeddings, which bridge the gap between the embedding space of the image encoder and the large language model. The goal for the model is simply to predict the next text token, giving the query embeddings and the previous text. <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/blip2_architecture.jpg" alt="drawing" width="600"/> This allows the model to be used for tasks like: - image captioning - visual question answering (VQA) - chat-like conversations by feeding the image and the previous conversation as prompt to the model ## Direct Use and Downstream Use You can use the raw model for conditional text generation given an image and optional text. See the [model hub](https://huggingface.co/models?search=Salesforce/blip) to look for fine-tuned versions on a task that interests you. ## Bias, Risks, Limitations, and Ethical Considerations BLIP2-FlanT5 uses off-the-shelf Flan-T5 as the language model. It inherits the same risks and limitations from [Flan-T5](https://arxiv.org/pdf/2210.11416.pdf): > Language models, including Flan-T5, can potentially be used for language generation in a harmful way, according to Rae et al. (2021). Flan-T5 should not be used directly in any application, without a prior assessment of safety and fairness concerns specific to the application. BLIP2 is fine-tuned on image-text datasets (e.g. [LAION](https://laion.ai/blog/laion-400-open-dataset/) ) collected from the internet. As a result the model itself is potentially vulnerable to generating equivalently inappropriate content or replicating inherent biases in the underlying data. BLIP2 has not been tested in real world applications. It should not be directly deployed in any applications. Researchers should first carefully assess the safety and fairness of the model in relation to the specific context they’re being deployed within. ### How to use For code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/main/en/model_doc/blip-2#transformers.Blip2ForConditionalGeneration.forward.example). #### Running the model on CPU <details> <summary> Click to expand </summary> ```python import requests from PIL import Image from transformers import BlipProcessor, Blip2ForConditionalGeneration processor = BlipProcessor.from_pretrained("Salesforce/blip2-flan-t5-xl") model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-flan-t5-xl") img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB') question = "how many dogs are in the picture?" inputs = processor(raw_image, question, return_tensors="pt") out = model.generate(**inputs) print(processor.decode(out[0], skip_special_tokens=True)) ``` </details> #### Running the model on GPU ##### In full precision <details> <summary> Click to expand </summary> ```python # pip install accelerate import requests from PIL import Image from transformers import Blip2Processor, Blip2ForConditionalGeneration processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xl") model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-flan-t5-xl", device_map="auto") img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB') question = "how many dogs are in the picture?" inputs = processor(raw_image, question, return_tensors="pt").to("cuda") out = model.generate(**inputs) print(processor.decode(out[0], skip_special_tokens=True)) ``` </details> ##### In half precision (`float16`) <details> <summary> Click to expand </summary> ```python # pip install accelerate import torch import requests from PIL import Image from transformers import Blip2Processor, Blip2ForConditionalGeneration processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xl") model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-flan-t5-xl", torch_dtype=torch.float16, device_map="auto") img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB') question = "how many dogs are in the picture?" inputs = processor(raw_image, question, return_tensors="pt").to("cuda", torch.float16) out = model.generate(**inputs) print(processor.decode(out[0], skip_special_tokens=True)) ``` </details> ##### In 8-bit precision (`int8`) <details> <summary> Click to expand </summary> ```python # pip install accelerate bitsandbytes import torch import requests from PIL import Image from transformers import Blip2Processor, Blip2ForConditionalGeneration processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xl") model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-flan-t5-xl", load_in_8bit=True, device_map="auto") img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB') question = "how many dogs are in the picture?" inputs = processor(raw_image, question, return_tensors="pt").to("cuda", torch.float16) out = model.generate(**inputs) print(processor.decode(out[0], skip_special_tokens=True)) ``` </details>
latent-consistency/lcm-lora-sdxl
latent-consistency
"2023-11-24T13:31:08Z"
109,736
652
diffusers
[ "diffusers", "lora", "text-to-image", "arxiv:2311.05556", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "license:openrail++", "has_space", "region:us" ]
text-to-image
"2023-11-09T00:34:02Z"
--- library_name: diffusers base_model: stabilityai/stable-diffusion-xl-base-1.0 tags: - lora - text-to-image license: openrail++ inference: false --- # Latent Consistency Model (LCM) LoRA: SDXL Latent Consistency Model (LCM) LoRA was proposed in [LCM-LoRA: A universal Stable-Diffusion Acceleration Module](https://arxiv.org/abs/2311.05556) by *Simian Luo, Yiqin Tan, Suraj Patil, Daniel Gu et al.* It is a distilled consistency adapter for [`stable-diffusion-xl-base-1.0`](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) that allows to reduce the number of inference steps to only between **2 - 8 steps**. | Model | Params / M | |----------------------------------------------------------------------------|------------| | [lcm-lora-sdv1-5](https://huggingface.co/latent-consistency/lcm-lora-sdv1-5) | 67.5 | | [lcm-lora-ssd-1b](https://huggingface.co/latent-consistency/lcm-lora-ssd-1b) | 105 | | [**lcm-lora-sdxl**](https://huggingface.co/latent-consistency/lcm-lora-sdxl) | **197M** | ## Usage LCM-LoRA is supported in 🤗 Hugging Face Diffusers library from version v0.23.0 onwards. To run the model, first install the latest version of the Diffusers library as well as `peft`, `accelerate` and `transformers`. audio dataset from the Hugging Face Hub: ```bash pip install --upgrade pip pip install --upgrade diffusers transformers accelerate peft ``` ***Note: For detailed usage examples we recommend you to check out our official [LCM-LoRA docs](https://huggingface.co/docs/diffusers/main/en/using-diffusers/inference_with_lcm_lora)*** ### Text-to-Image The adapter can be loaded with it's base model `stabilityai/stable-diffusion-xl-base-1.0`. Next, the scheduler needs to be changed to [`LCMScheduler`](https://huggingface.co/docs/diffusers/v0.22.3/en/api/schedulers/lcm#diffusers.LCMScheduler) and we can reduce the number of inference steps to just 2 to 8 steps. Please make sure to either disable `guidance_scale` or use values between 1.0 and 2.0. ```python import torch from diffusers import LCMScheduler, AutoPipelineForText2Image model_id = "stabilityai/stable-diffusion-xl-base-1.0" adapter_id = "latent-consistency/lcm-lora-sdxl" pipe = AutoPipelineForText2Image.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16") pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") # load and fuse lcm lora pipe.load_lora_weights(adapter_id) pipe.fuse_lora() prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k" # disable guidance_scale by passing 0 image = pipe(prompt=prompt, num_inference_steps=4, guidance_scale=0).images[0] ``` ![](./image.png) ### Inpainting LCM-LoRA can be used for inpainting as well. ```python import torch from diffusers import AutoPipelineForInpainting, LCMScheduler from diffusers.utils import load_image, make_image_grid pipe = AutoPipelineForInpainting.from_pretrained( "diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16", ).to("cuda") # set scheduler pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) # load LCM-LoRA pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl") pipe.fuse_lora() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png").resize((1024, 1024)) mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png").resize((1024, 1024)) prompt = "a castle on top of a mountain, highly detailed, 8k" generator = torch.manual_seed(42) image = pipe( prompt=prompt, image=init_image, mask_image=mask_image, generator=generator, num_inference_steps=5, guidance_scale=4, ).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdxl_inpainting.png) ## Combine with styled LoRAs LCM-LoRA can be combined with other LoRAs to generate styled-images in very few steps (4-8). In the following example, we'll use the LCM-LoRA with the [papercut LoRA](TheLastBen/Papercut_SDXL). To learn more about how to combine LoRAs, refer to [this guide](https://huggingface.co/docs/diffusers/tutorials/using_peft_for_inference#combine-multiple-adapters). ```python import torch from diffusers import DiffusionPipeline, LCMScheduler pipe = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", variant="fp16", torch_dtype=torch.float16 ).to("cuda") # set scheduler pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) # load LoRAs pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl", adapter_name="lcm") pipe.load_lora_weights("TheLastBen/Papercut_SDXL", weight_name="papercut.safetensors", adapter_name="papercut") # Combine LoRAs pipe.set_adapters(["lcm", "papercut"], adapter_weights=[1.0, 0.8]) prompt = "papercut, a cute fox" generator = torch.manual_seed(0) image = pipe(prompt, num_inference_steps=4, guidance_scale=1, generator=generator).images[0] image ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdx_lora_mix.png) ### ControlNet ```python import torch import cv2 import numpy as np from PIL import Image from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, LCMScheduler from diffusers.utils import load_image image = load_image( "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" ).resize((1024, 1024)) image = np.array(image) low_threshold = 100 high_threshold = 200 image = cv2.Canny(image, low_threshold, high_threshold) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) canny_image = Image.fromarray(image) controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-canny-sdxl-1.0-small", torch_dtype=torch.float16, variant="fp16") pipe = StableDiffusionXLControlNetPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16, safety_checker=None, variant="fp16" ).to("cuda") # set scheduler pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) # load LCM-LoRA pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl") pipe.fuse_lora() generator = torch.manual_seed(0) image = pipe( "picture of the mona lisa", image=canny_image, num_inference_steps=5, guidance_scale=1.5, controlnet_conditioning_scale=0.5, cross_attention_kwargs={"scale": 1}, generator=generator, ).images[0] make_image_grid([canny_image, image], rows=1, cols=2) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdxl_controlnet.png) <Tip> The inference parameters in this example might not work for all examples, so we recommend you to try different values for `num_inference_steps`, `guidance_scale`, `controlnet_conditioning_scale` and `cross_attention_kwargs` parameters and choose the best one. </Tip> ### T2I Adapter This example shows how to use the LCM-LoRA with the [Canny T2I-Adapter](TencentARC/t2i-adapter-canny-sdxl-1.0) and SDXL. ```python import torch import cv2 import numpy as np from PIL import Image from diffusers import StableDiffusionXLAdapterPipeline, T2IAdapter, LCMScheduler from diffusers.utils import load_image, make_image_grid # Prepare image # Detect the canny map in low resolution to avoid high-frequency details image = load_image( "https://huggingface.co/Adapter/t2iadapter/resolve/main/figs_SDXLV1.0/org_canny.jpg" ).resize((384, 384)) image = np.array(image) low_threshold = 100 high_threshold = 200 image = cv2.Canny(image, low_threshold, high_threshold) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) canny_image = Image.fromarray(image).resize((1024, 1024)) # load adapter adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-canny-sdxl-1.0", torch_dtype=torch.float16, varient="fp16").to("cuda") pipe = StableDiffusionXLAdapterPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", adapter=adapter, torch_dtype=torch.float16, variant="fp16", ).to("cuda") # set scheduler pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) # load LCM-LoRA pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl") prompt = "Mystical fairy in real, magic, 4k picture, high quality" negative_prompt = "extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured" generator = torch.manual_seed(0) image = pipe( prompt=prompt, negative_prompt=negative_prompt, image=canny_image, num_inference_steps=4, guidance_scale=1.5, adapter_conditioning_scale=0.8, adapter_conditioning_factor=1, generator=generator, ).images[0] make_image_grid([canny_image, image], rows=1, cols=2) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdxl_t2iadapter.png) ## Speed Benchmark TODO ## Training TODO
tiiuae/falcon-7b
tiiuae
"2023-09-29T14:32:19Z"
109,277
1,023
transformers
[ "transformers", "pytorch", "falcon", "text-generation", "custom_code", "en", "dataset:tiiuae/falcon-refinedweb", "arxiv:2205.14135", "arxiv:1911.02150", "arxiv:2101.00027", "arxiv:2005.14165", "arxiv:2104.09864", "arxiv:2306.01116", "license:apache-2.0", "autotrain_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2023-04-24T16:36:24Z"
--- datasets: - tiiuae/falcon-refinedweb language: - en inference: false license: apache-2.0 --- # 🚀 Falcon-7B **Falcon-7B is a 7B parameters causal decoder-only model built by [TII](https://www.tii.ae) and trained on 1,500B tokens of [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) enhanced with curated corpora. It is made available under the Apache 2.0 license.** *Paper coming soon* 😊. 🤗 To get started with Falcon (inference, finetuning, quantization, etc.), we recommend reading [this great blogpost fron HF](https://huggingface.co/blog/falcon)! ## Why use Falcon-7B? * **It outperforms comparable open-source models** (e.g., [MPT-7B](https://huggingface.co/mosaicml/mpt-7b), [StableLM](https://github.com/Stability-AI/StableLM), [RedPajama](https://huggingface.co/togethercomputer/RedPajama-INCITE-Base-7B-v0.1) etc.), thanks to being trained on 1,500B tokens of [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) enhanced with curated corpora. See the [OpenLLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). * **It features an architecture optimized for inference**, with FlashAttention ([Dao et al., 2022](https://arxiv.org/abs/2205.14135)) and multiquery ([Shazeer et al., 2019](https://arxiv.org/abs/1911.02150)). * **It is made available under a permissive Apache 2.0 license allowing for commercial use**, without any royalties or restrictions. ⚠️ **This is a raw, pretrained model, which should be further finetuned for most usecases.** If you are looking for a version better suited to taking generic instructions in a chat format, we recommend taking a look at [Falcon-7B-Instruct](https://huggingface.co/tiiuae/falcon-7b-instruct). 🔥 **Looking for an even more powerful model?** [Falcon-40B](https://huggingface.co/tiiuae/falcon-40b) is Falcon-7B's big brother! ```python from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model = "tiiuae/falcon-7b" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", ) sequences = pipeline( "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", max_length=200, do_sample=True, top_k=10, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` 💥 **Falcon LLMs require PyTorch 2.0 for use with `transformers`!** For fast inference with Falcon, check-out [Text Generation Inference](https://github.com/huggingface/text-generation-inference)! Read more in this [blogpost]((https://huggingface.co/blog/falcon). You will need **at least 16GB of memory** to swiftly run inference with Falcon-7B. # Model Card for Falcon-7B ## Model Details ### Model Description - **Developed by:** [https://www.tii.ae](https://www.tii.ae); - **Model type:** Causal decoder-only; - **Language(s) (NLP):** English, German, Spanish, French (and limited capabilities in Italian, Portuguese, Polish, Dutch, Romanian, Czech, Swedish); - **License:** Apache 2.0. ### Model Source - **Paper:** *coming soon*. ## Uses ### Direct Use Research on large language models; as a foundation for further specialization and finetuning for specific usecases (e.g., summarization, text generation, chatbot, etc.) ### Out-of-Scope Use Production use without adequate assessment of risks and mitigation; any use cases which may be considered irresponsible or harmful. ## Bias, Risks, and Limitations Falcon-7B is trained on English and French data only, and will not generalize appropriately to other languages. Furthermore, as it is trained on a large-scale corpora representative of the web, it will carry the stereotypes and biases commonly encountered online. ### Recommendations We recommend users of Falcon-7B to consider finetuning it for the specific set of tasks of interest, and for guardrails and appropriate precautions to be taken for any production use. ## How to Get Started with the Model ```python from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model = "tiiuae/falcon-7b" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", ) sequences = pipeline( "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", max_length=200, do_sample=True, top_k=10, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` ## Training Details ### Training Data Falcon-7B was trained on 1,500B tokens of [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb), a high-quality filtered and deduplicated web dataset which we enhanced with curated corpora. Significant components from our curated copora were inspired by The Pile ([Gao et al., 2020](https://arxiv.org/abs/2101.00027)). | **Data source** | **Fraction** | **Tokens** | **Sources** | |--------------------|--------------|------------|-----------------------------------| | [RefinedWeb-English](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) | 79% | 1,185B | massive web crawl | | Books | 7% | 110B | | | Conversations | 6% | 85B | Reddit, StackOverflow, HackerNews | | Code | 3% | 45B | | | RefinedWeb-French | 3% | 45B | massive web crawl | | Technical | 2% | 30B | arXiv, PubMed, USPTO, etc. | The data was tokenized with the Falcon-[7B](https://huggingface.co/tiiuae/falcon-7b)/[40B](https://huggingface.co/tiiuae/falcon-40b) tokenizer. ### Training Procedure Falcon-7B was trained on 384 A100 40GB GPUs, using a 2D parallelism strategy (PP=2, DP=192) combined with ZeRO. #### Training Hyperparameters | **Hyperparameter** | **Value** | **Comment** | |--------------------|------------|-------------------------------------------| | Precision | `bfloat16` | | | Optimizer | AdamW | | | Learning rate | 6e-4 | 4B tokens warm-up, cosine decay to 1.2e-5 | | Weight decay | 1e-1 | | | Z-loss | 1e-4 | | | Batch size | 2304 | 30B tokens ramp-up | #### Speeds, Sizes, Times Training happened in early March 2023 and took about two weeks. ## Evaluation *Paper coming soon*. See the [OpenLLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) for early results. ## Technical Specifications ### Model Architecture and Objective Falcon-7B is a causal decoder-only model trained on a causal language modeling task (i.e., predict the next token). The architecture is broadly adapted from the GPT-3 paper ([Brown et al., 2020](https://arxiv.org/abs/2005.14165)), with the following differences: * **Positionnal embeddings:** rotary ([Su et al., 2021](https://arxiv.org/abs/2104.09864)); * **Attention:** multiquery ([Shazeer et al., 2019](https://arxiv.org/abs/1911.02150)) and FlashAttention ([Dao et al., 2022](https://arxiv.org/abs/2205.14135)); * **Decoder-block:** parallel attention/MLP with a single layer norm. | **Hyperparameter** | **Value** | **Comment** | |--------------------|-----------|----------------------------------------| | Layers | 32 | | | `d_model` | 4544 | Increased to compensate for multiquery | | `head_dim` | 64 | Reduced to optimise for FlashAttention | | Vocabulary | 65024 | | | Sequence length | 2048 | | ### Compute Infrastructure #### Hardware Falcon-7B was trained on AWS SageMaker, on 384 A100 40GB GPUs in P4d instances. #### Software Falcon-7B was trained a custom distributed training codebase, Gigatron. It uses a 3D parallelism approach combined with ZeRO and high-performance Triton kernels (FlashAttention, etc.) ## Citation *Paper coming soon* 😊. In the meanwhile, you can use the following information to cite: ``` @article{falcon40b, title={{Falcon-40B}: an open large language model with state-of-the-art performance}, author={Almazrouei, Ebtesam and Alobeidli, Hamza and Alshamsi, Abdulaziz and Cappelli, Alessandro and Cojocaru, Ruxandra and Debbah, Merouane and Goffinet, Etienne and Heslow, Daniel and Launay, Julien and Malartic, Quentin and Noune, Badreddine and Pannier, Baptiste and Penedo, Guilherme}, year={2023} } ``` To learn more about the pretraining dataset, see the 📓 [RefinedWeb paper](https://arxiv.org/abs/2306.01116). ``` @article{refinedweb, title={The {R}efined{W}eb dataset for {F}alcon {LLM}: outperforming curated corpora with web data, and web data only}, author={Guilherme Penedo and Quentin Malartic and Daniel Hesslow and Ruxandra Cojocaru and Alessandro Cappelli and Hamza Alobeidli and Baptiste Pannier and Ebtesam Almazrouei and Julien Launay}, journal={arXiv preprint arXiv:2306.01116}, eprint={2306.01116}, eprinttype = {arXiv}, url={https://arxiv.org/abs/2306.01116}, year={2023} } ``` ## License Falcon-7B is made available under the Apache 2.0 license. ## Contact falconllm@tii.ae
microsoft/dit-base
microsoft
"2023-02-27T17:55:38Z"
109,222
16
transformers
[ "transformers", "pytorch", "beit", "dit", "arxiv:2203.02378", "region:us" ]
null
"2022-03-07T17:18:46Z"
--- tags: - dit inference: false --- # Document Image Transformer (base-sized model) Document Image Transformer (DiT) model pre-trained on IIT-CDIP (Lewis et al., 2006), a dataset that includes 42 million document images. It was introduced in the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Li et al. and first released in [this repository](https://github.com/microsoft/unilm/tree/master/dit). Note that DiT is identical to the architecture of [BEiT](https://huggingface.co/docs/transformers/model_doc/beit). Disclaimer: The team releasing DiT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The Document Image Transformer (DiT) is a transformer encoder model (BERT-like) pre-trained on a large collection of images in a self-supervised fashion. The pre-training objective for the model is to predict visual tokens from the encoder of a discrete VAE (dVAE), based on masked patches. Images are presented to the model as a sequence of fixed-size patches (resolution 16x16), which are linearly embedded. One also adds absolute position embeddings before feeding the sequence to the layers of the Transformer encoder. By pre-training the model, it learns an inner representation of images that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled document images for instance, you can train a standard classifier by placing a linear layer on top of the pre-trained encoder. ## Intended uses & limitations You can use the raw model for encoding document images into a vector space, but it's mostly meant to be fine-tuned on tasks like document image classification, table detection or document layout analysis. See the [model hub](https://huggingface.co/models?search=microsoft/dit) to look for fine-tuned versions on a task that interests you. ### How to use Here is how to use this model in PyTorch: ```python from transformers import BeitImageProcessor, BeitForMaskedImageModeling import torch from PIL import Image image = Image.open('path_to_your_document_image').convert('RGB') processor = BeitImageProcessor.from_pretrained("microsoft/dit-base") model = BeitForMaskedImageModeling.from_pretrained("microsoft/dit-base") num_patches = (model.config.image_size // model.config.patch_size) ** 2 pixel_values = processor(images=image, return_tensors="pt").pixel_values # create random boolean mask of shape (batch_size, num_patches) bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool() outputs = model(pixel_values, bool_masked_pos=bool_masked_pos) loss, logits = outputs.loss, outputs.logits ``` ### BibTeX entry and citation info ```bibtex @article{Lewis2006BuildingAT, title={Building a test collection for complex document information processing}, author={David D. Lewis and Gady Agam and Shlomo Engelson Argamon and Ophir Frieder and David A. Grossman and Jefferson Heard}, journal={Proceedings of the 29th annual international ACM SIGIR conference on Research and development in information retrieval}, year={2006} } ```
facebook/dino-vitb16
facebook
"2023-05-22T07:04:00Z"
108,631
93
transformers
[ "transformers", "pytorch", "tf", "vit", "image-feature-extraction", "dino", "vision", "dataset:imagenet-1k", "arxiv:2104.14294", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
image-feature-extraction
"2022-03-02T23:29:05Z"
--- license: apache-2.0 tags: - dino - vision datasets: - imagenet-1k --- # Vision Transformer (base-sized model, patch size 16) trained using DINO Vision Transformer (ViT) model trained using the DINO method. It was introduced in the paper [Emerging Properties in Self-Supervised Vision Transformers](https://arxiv.org/abs/2104.14294) by Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, Armand Joulin and first released in [this repository](https://github.com/facebookresearch/dino). Disclaimer: The team releasing DINO did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The Vision Transformer (ViT) is a transformer encoder model (BERT-like) pretrained on a large collection of images in a self-supervised fashion, namely ImageNet-1k, at a resolution of 224x224 pixels. Images are presented to the model as a sequence of fixed-size patches (resolution 16x16), which are linearly embedded. One also adds a [CLS] token to the beginning of a sequence to use it for classification tasks. One also adds absolute position embeddings before feeding the sequence to the layers of the Transformer encoder. Note that this model does not include any fine-tuned heads. By pre-training the model, it learns an inner representation of images that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled images for instance, you can train a standard classifier by placing a linear layer on top of the pre-trained encoder. One typically places a linear layer on top of the [CLS] token, as the last hidden state of this token can be seen as a representation of an entire image. ## Intended uses & limitations You can use the raw model for image classification. See the [model hub](https://huggingface.co/models?search=google/vit) to look for fine-tuned versions on a task that interests you. ### How to use Here is how to use this model: ```python from transformers import ViTImageProcessor, ViTModel from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) processor = ViTImageProcessor.from_pretrained('facebook/dino-vitb16') model = ViTModel.from_pretrained('facebook/dino-vitb16') inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state ``` ### BibTeX entry and citation info ```bibtex @article{DBLP:journals/corr/abs-2104-14294, author = {Mathilde Caron and Hugo Touvron and Ishan Misra and Herv{\'{e}} J{\'{e}}gou and Julien Mairal and Piotr Bojanowski and Armand Joulin}, title = {Emerging Properties in Self-Supervised Vision Transformers}, journal = {CoRR}, volume = {abs/2104.14294}, year = {2021}, url = {https://arxiv.org/abs/2104.14294}, archivePrefix = {arXiv}, eprint = {2104.14294}, timestamp = {Tue, 04 May 2021 15:12:43 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-2104-14294.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
stabilityai/stable-video-diffusion-img2vid-xt-1-1
stabilityai
"2024-04-12T08:45:37Z"
108,312
468
diffusers
[ "diffusers", "safetensors", "image-to-video", "license:other", "has_space", "diffusers:StableVideoDiffusionPipeline", "region:us" ]
image-to-video
"2024-02-02T15:40:53Z"
--- pipeline_tag: image-to-video license: other license_name: stable-video-diffusion-1-1-nc-community license_link: LICENSE extra_gated_prompt: >- STABILITY AI NON-COMMERCIAL RESEARCH COMMUNITY LICENSE AGREEMENT Dated: February 2, 2024 By clicking “I Accept” below or by using or distributing any portion or element of the Models, Software, Software Products or Derivative Works, you agree to the terms of this License. If you do not agree to this License, then you do not have any rights to use the Software Products or Derivative Works through this License, and you must immediately cease using the Software Products or Derivative Works. If you are agreeing to be bound by the terms of this License on behalf of your employer or other entity, you represent and warrant to Stability AI that you have full legal authority to bind your employer or such entity to this License. If you do not have the requisite authority, you may not accept the License or access the Software Products or Derivative Works on behalf of your employer or other entity. "Agreement" means this Stable Non-Commercial Research Community License Agreement. “AUP” means the Stability AI Acceptable Use Policy available at https://stability.ai/use-policy, as may be updated from time to time. "Derivative Work(s)” means (a) any derivative work of the Software Products as recognized by U.S. copyright laws and (b) any modifications to a Model, and any other model created which is based on or derived from the Model or the Model’s output. For clarity, Derivative Works do not include the output of any Model. “Documentation” means any specifications, manuals, documentation, and other written information provided by Stability AI related to the Software. "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf. “Model(s)" means, collectively, Stability AI’s proprietary models and algorithms, including machine-learning models, trained model weights and other elements of the foregoing, made available under this Agreement. “Non-Commercial Uses” means exercising any of the rights granted herein for the purpose of research or non-commercial purposes. Non-Commercial Uses does not include any production use of the Software Products or any Derivative Works. "Stability AI" or "we" means Stability AI Ltd. and its affiliates. "Software" means Stability AI’s proprietary software made available under this Agreement. “Software Products” means the Models, Software and Documentation, individually or in any combination. 1. License Rights and Redistribution. a. Subject to your compliance with this Agreement, the AUP (which is hereby incorporated herein by reference), and the Documentation, Stability AI grants you a non-exclusive, worldwide, non-transferable, non-sublicensable, revocable, royalty free and limited license under Stability AI’s intellectual property or other rights owned or controlled by Stability AI embodied in the Software Products to use, reproduce, distribute, and create Derivative Works of, the Software Products, in each case for Non-Commercial Uses only. b. You may not use the Software Products or Derivative Works to enable third parties to use the Software Products or Derivative Works as part of your hosted service or via your APIs, whether you are adding substantial additional functionality thereto or not. Merely distributing the Software Products or Derivative Works for download online without offering any related service (ex. by distributing the Models on HuggingFace) is not a violation of this subsection. If you wish to use the Software Products or any Derivative Works for commercial or production use or you wish to make the Software Products or any Derivative Works available to third parties via your hosted service or your APIs, contact Stability AI at https://stability.ai/contact. c. If you distribute or make the Software Products, or any Derivative Works thereof, available to a third party, the Software Products, Derivative Works, or any portion thereof, respectively, will remain subject to this Agreement and you must (i) provide a copy of this Agreement to such third party, and (ii) retain the following attribution notice within a "Notice" text file distributed as a part of such copies: "This Stability AI Model is licensed under the Stability AI Non-Commercial Research Community License, Copyright (c) Stability AI Ltd. All Rights Reserved.” If you create a Derivative Work of a Software Product, you may add your own attribution notices to the Notice file included with the Software Product, provided that you clearly indicate which attributions apply to the Software Product and you must state in the NOTICE file that you changed the Software Product and how it was modified. 2. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE SOFTWARE PRODUCTS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE SOFTWARE PRODUCTS, DERIVATIVE WORKS OR ANY OUTPUT OR RESULTS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE SOFTWARE PRODUCTS, DERIVATIVE WORKS AND ANY OUTPUT AND RESULTS. 3. Limitation of Liability. IN NO EVENT WILL STABILITY AI OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF STABILITY AI OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING. 4. Intellectual Property. a. No trademark licenses are granted under this Agreement, and in connection with the Software Products or Derivative Works, neither Stability AI nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Software Products or Derivative Works. b. Subject to Stability AI’s ownership of the Software Products and Derivative Works made by or for Stability AI, with respect to any Derivative Works that are made by you, as between you and Stability AI, you are and will be the owner of such Derivative Works c. If you institute litigation or other proceedings against Stability AI (including a cross-claim or counterclaim in a lawsuit) alleging that the Software Products, Derivative Works or associated outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Stability AI from and against any claim by any third party arising out of or related to your use or distribution of the Software Products or Derivative Works in violation of this Agreement. 5. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Software Products and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Stability AI may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of any Software Products or Derivative Works. Sections 2-4 shall survive the termination of this Agreement. 6. Governing Law. This Agreement will be governed by and construed in accordance with the laws of the United States and the State of California without regard to choice of law principles. extra_gated_description: Stable Video Diffusion 1.1 License Agreement extra_gated_button_content: Submit extra_gated_fields: Name: text Company Name (if applicable): text Email: text Other Comments: text By clicking here, you accept the License agreement, and will use the Software Products and Derivative Works for non-commercial or research purposes only: checkbox By clicking here, you agree to sharing with Stability AI the information contained within this form and that Stability AI can contact you for the purposes of marketing our products and services: checkbox --- # Stable Video Diffusion 1.1 Image-to-Video Model Card <!-- Provide a quick summary of what the model is/does. --> ![row01](svd11.webp) Stable Video Diffusion (SVD) 1.1 Image-to-Video is a diffusion model that takes in a still image as a conditioning frame, and generates a video from it. Please note: For commercial use, please refer to https://stability.ai/membership. ## Model Details ### Model Description (SVD 1.1) Image-to-Video is a latent diffusion model trained to generate short video clips from an image conditioning. This model was trained to generate 25 frames at resolution 1024x576 given a context frame of the same size, finetuned from [SVD Image-to-Video [25 frames]](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt). Fine tuning was performed with fixed conditioning at 6FPS and Motion Bucket Id 127 to improve the consistency of outputs without the need to adjust hyper parameters. These conditions are still adjustable and have not been removed. Performance outside of the fixed conditioning settings may vary compared to SVD 1.0. - **Developed by:** Stability AI - **Funded by:** Stability AI - **Model type:** Generative image-to-video model - **Finetuned from model:** SVD Image-to-Video [25 frames] ### Model Sources For research purposes, we recommend our `generative-models` Github repository (https://github.com/Stability-AI/generative-models), which implements the most popular diffusion frameworks (both training and inference). - **Repository:** https://github.com/Stability-AI/generative-models - **Paper:** https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets ## Uses ### Direct Use The model is intended for both non-commercial and commercial usage. You can use this model for non-commercial or research purposes under the following [license](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt-1-1/blob/main/LICENSE). Possible research areas and tasks include - Research on generative models. - Safe deployment of models which have the potential to generate harmful content. - Probing and understanding the limitations and biases of generative models. - Generation of artworks and use in design and other artistic processes. - Applications in educational or creative tools. For commercial use, please refer to https://stability.ai/membership. Excluded uses are described below. ### Out-of-Scope Use The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model. The model should not be used in any way that violates Stability AI's [Acceptable Use Policy](https://stability.ai/use-policy). ## Limitations and Bias ### Limitations - The generated videos are rather short (<= 4sec), and the model does not achieve perfect photorealism. - The model may generate videos without motion, or very slow camera pans. - The model cannot be controlled through text. - The model cannot render legible text. - Faces and people in general may not be generated properly. - The autoencoding part of the model is lossy. ### Recommendations The model is intended for both non-commercial and commercial usage. ## How to Get Started with the Model Check out https://github.com/Stability-AI/generative-models
TheBloke/Mixtral-8x7B-Instruct-v0.1-AWQ
TheBloke
"2023-12-14T14:30:42Z"
108,147
49
transformers
[ "transformers", "safetensors", "mixtral", "text-generation", "conversational", "fr", "it", "de", "es", "en", "base_model:mistralai/Mixtral-8x7B-Instruct-v0.1", "license:apache-2.0", "autotrain_compatible", "has_space", "text-generation-inference", "4-bit", "region:us" ]
text-generation
"2023-12-11T21:01:33Z"
--- base_model: mistralai/Mixtral-8x7B-Instruct-v0.1 inference: false language: - fr - it - de - es - en license: apache-2.0 model_creator: Mistral AI_ model_name: Mixtral 8X7B Instruct v0.1 model_type: mixtral prompt_template: '[INST] {prompt} [/INST] ' quantized_by: TheBloke widget: - output: text: 'Arr, shiver me timbers! Ye have a llama on yer lawn, ye say? Well, that be a new one for me! Here''s what I''d suggest, arr: 1. Firstly, ensure yer safety. Llamas may look gentle, but they can be protective if they feel threatened. 2. Try to make the area less appealing to the llama. Remove any food sources or water that might be attracting it. 3. Contact local animal control or a wildlife rescue organization. They be the experts and can provide humane ways to remove the llama from yer property. 4. If ye have any experience with animals, you could try to gently herd the llama towards a nearby field or open space. But be careful, arr! Remember, arr, it be important to treat the llama with respect and care. It be a creature just trying to survive, like the rest of us.' text: '[INST] You are a pirate chatbot who always responds with Arr and pirate speak! There''s a llama on my lawn, how can I get rid of him? [/INST]' --- <!-- markdownlint-disable MD041 --> <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Mixtral 8X7B Instruct v0.1 - AWQ - Model creator: [Mistral AI_](https://huggingface.co/mistralai) - Original model: [Mixtral 8X7B Instruct v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) <!-- description start --> ## Description This repo contains AWQ model files for [Mistral AI_'s Mixtral 8X7B Instruct v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1). ### About AWQ AWQ is an efficient, accurate and blazing-fast low-bit weight quantization method, currently supporting 4-bit quantization. Compared to GPTQ, it offers faster Transformers-based inference with equivalent or better quality compared to the most commonly used GPTQ settings. AWQ models are currently supported on Linux and Windows, with NVidia GPUs only. macOS users: please use GGUF models instead. It is supported by: - [Text Generation Webui](https://github.com/oobabooga/text-generation-webui) - using Loader: AutoAWQ - [vLLM](https://github.com/vllm-project/vllm) - version 0.2.2 or later for support for all model types. - [Hugging Face Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) - [Transformers](https://huggingface.co/docs/transformers) version 4.35.0 and later, from any code or client that supports Transformers - [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) - for use from Python code <!-- description end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF) * [Mistral AI_'s original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: Mistral ``` [INST] {prompt} [/INST] ``` <!-- prompt-template end --> <!-- README_AWQ.md-provided-files start --> ## Provided files, and AWQ parameters I currently release 128g GEMM models only. The addition of group_size 32 models, and GEMV kernel models, is being actively considered. Models are released as sharded safetensors files. | Branch | Bits | GS | AWQ Dataset | Seq Len | Size | | ------ | ---- | -- | ----------- | ------- | ---- | | main | 4 | 128 | [VMware Open Instruct](https://huggingface.co/datasets/VMware/open-instruct/viewer/) | 8192 | 24.65 GB <!-- README_AWQ.md-provided-files end --> <!-- README_AWQ.md-text-generation-webui start --> ## How to easily download and use this model in [text-generation-webui](https://github.com/oobabooga/text-generation-webui) Please make sure you're using the latest version of [text-generation-webui](https://github.com/oobabooga/text-generation-webui). It is strongly recommended to use the text-generation-webui one-click-installers unless you're sure you know how to make a manual install. 1. Click the **Model tab**. 2. Under **Download custom model or LoRA**, enter `TheBloke/Mixtral-8x7B-Instruct-v0.1-AWQ`. 3. Click **Download**. 4. The model will start downloading. Once it's finished it will say "Done". 5. In the top left, click the refresh icon next to **Model**. 6. In the **Model** dropdown, choose the model you just downloaded: `Mixtral-8x7B-Instruct-v0.1-AWQ` 7. Select **Loader: AutoAWQ**. 8. Click Load, and the model will load and is now ready for use. 9. If you want any custom settings, set them and then click **Save settings for this model** followed by **Reload the Model** in the top right. 10. Once you're ready, click the **Text Generation** tab and enter a prompt to get started! <!-- README_AWQ.md-text-generation-webui end --> <!-- README_AWQ.md-use-from-vllm start --> ## Multi-user inference server: vLLM Documentation on installing and using vLLM [can be found here](https://vllm.readthedocs.io/en/latest/). - Please ensure you are using vLLM version 0.2 or later. - When using vLLM as a server, pass the `--quantization awq` parameter. For example: ```shell python3 -m vllm.entrypoints.api_server --model TheBloke/Mixtral-8x7B-Instruct-v0.1-AWQ --quantization awq --dtype auto ``` - When using vLLM from Python code, again set `quantization=awq`. For example: ```python from vllm import LLM, SamplingParams prompts = [ "Tell me about AI", "Write a story about llamas", "What is 291 - 150?", "How much wood would a woodchuck chuck if a woodchuck could chuck wood?", ] prompt_template=f'''[INST] {prompt} [/INST] ''' prompts = [prompt_template.format(prompt=prompt) for prompt in prompts] sampling_params = SamplingParams(temperature=0.8, top_p=0.95) llm = LLM(model="TheBloke/Mixtral-8x7B-Instruct-v0.1-AWQ", quantization="awq", dtype="auto") outputs = llm.generate(prompts, sampling_params) # Print the outputs. for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") ``` <!-- README_AWQ.md-use-from-vllm start --> <!-- README_AWQ.md-use-from-tgi start --> ## Multi-user inference server: Hugging Face Text Generation Inference (TGI) Use TGI version 1.1.0 or later. The official Docker container is: `ghcr.io/huggingface/text-generation-inference:1.1.0` Example Docker parameters: ```shell --model-id TheBloke/Mixtral-8x7B-Instruct-v0.1-AWQ --port 3000 --quantize awq --max-input-length 3696 --max-total-tokens 4096 --max-batch-prefill-tokens 4096 ``` Example Python code for interfacing with TGI (requires [huggingface-hub](https://github.com/huggingface/huggingface_hub) 0.17.0 or later): ```shell pip3 install huggingface-hub ``` ```python from huggingface_hub import InferenceClient endpoint_url = "https://your-endpoint-url-here" prompt = "Tell me about AI" prompt_template=f'''[INST] {prompt} [/INST] ''' client = InferenceClient(endpoint_url) response = client.text_generation(prompt, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.95, top_k=40, repetition_penalty=1.1) print(f"Model output: ", response) ``` <!-- README_AWQ.md-use-from-tgi end --> <!-- README_AWQ.md-use-from-python start --> ## Inference from Python code using Transformers ### Install the necessary packages - Requires: [Transformers](https://huggingface.co/docs/transformers) 4.35.0 or later. - Requires: [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) 0.1.6 or later. ```shell pip3 install --upgrade "autoawq>=0.1.6" "transformers>=4.35.0" ``` Note that if you are using PyTorch 2.0.1, the above AutoAWQ command will automatically upgrade you to PyTorch 2.1.0. If you are using CUDA 11.8 and wish to continue using PyTorch 2.0.1, instead run this command: ```shell pip3 install https://github.com/casper-hansen/AutoAWQ/releases/download/v0.1.6/autoawq-0.1.6+cu118-cp310-cp310-linux_x86_64.whl ``` If you have problems installing [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) using the pre-built wheels, install it from source instead: ```shell pip3 uninstall -y autoawq git clone https://github.com/casper-hansen/AutoAWQ cd AutoAWQ pip3 install . ``` ### Transformers example code (requires Transformers 4.35.0 and later) ```python from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer model_name_or_path = "TheBloke/Mixtral-8x7B-Instruct-v0.1-AWQ" tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) model = AutoModelForCausalLM.from_pretrained( model_name_or_path, low_cpu_mem_usage=True, device_map="cuda:0" ) # Using the text streamer to stream output one token at a time streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) prompt = "Tell me about AI" prompt_template=f'''[INST] {prompt} [/INST] ''' # Convert prompt to tokens tokens = tokenizer( prompt_template, return_tensors='pt' ).input_ids.cuda() generation_params = { "do_sample": True, "temperature": 0.7, "top_p": 0.95, "top_k": 40, "max_new_tokens": 512, "repetition_penalty": 1.1 } # Generate streamed output, visible one token at a time generation_output = model.generate( tokens, streamer=streamer, **generation_params ) # Generation without a streamer, which will include the prompt in the output generation_output = model.generate( tokens, **generation_params ) # Get the tokens from the output, decode them, print them token_output = generation_output[0] text_output = tokenizer.decode(token_output) print("model.generate output: ", text_output) # Inference is also possible via Transformers' pipeline from transformers import pipeline pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, **generation_params ) pipe_output = pipe(prompt_template)[0]['generated_text'] print("pipeline output: ", pipe_output) ``` <!-- README_AWQ.md-use-from-python end --> <!-- README_AWQ.md-compatibility start --> ## Compatibility The files provided are tested to work with: - [text-generation-webui](https://github.com/oobabooga/text-generation-webui) using `Loader: AutoAWQ`. - [vLLM](https://github.com/vllm-project/vllm) version 0.2.0 and later. - [Hugging Face Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) version 1.1.0 and later. - [Transformers](https://huggingface.co/docs/transformers) version 4.35.0 and later. - [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) version 0.1.1 and later. <!-- README_AWQ.md-compatibility end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Michael Levine, 阿明, Trailburnt, Nikolai Manek, John Detwiler, Randy H, Will Dee, Sebastain Graf, NimbleBox.ai, Eugene Pentland, Emad Mostaque, Ai Maven, Jim Angel, Jeff Scroggin, Michael Davis, Manuel Alberto Morcote, Stephen Murray, Robert, Justin Joy, Luke @flexchar, Brandon Frisco, Elijah Stavena, S_X, Dan Guido, Undi ., Komninos Chatzipapas, Shadi, theTransient, Lone Striker, Raven Klaugh, jjj, Cap'n Zoog, Michel-Marie MAUDET (LINAGORA), Matthew Berman, David, Fen Risland, Omer Bin Jawed, Luke Pendergrass, Kalila, OG, Erik Bjäreholt, Rooh Singh, Joseph William Delisle, Dan Lewis, TL, John Villwock, AzureBlack, Brad, Pedro Madruga, Caitlyn Gatomon, K, jinyuan sun, Mano Prime, Alex, Jeffrey Morgan, Alicia Loh, Illia Dulskyi, Chadd, transmissions 11, fincy, Rainer Wilmers, ReadyPlayerEmma, knownsqashed, Mandus, biorpg, Deo Leter, Brandon Phillips, SuperWojo, Sean Connelly, Iucharbius, Jack West, Harry Royden McLaughlin, Nicholas, terasurfer, Vitor Caleffi, Duane Dunston, Johann-Peter Hartmann, David Ziegler, Olakabola, Ken Nordquist, Trenton Dambrowitz, Tom X Nguyen, Vadim, Ajan Kanaga, Leonard Tan, Clay Pascal, Alexandros Triantafyllidis, JM33133, Xule, vamX, ya boyyy, subjectnull, Talal Aujan, Alps Aficionado, wassieverse, Ari Malik, James Bentley, Woland, Spencer Kim, Michael Dempsey, Fred von Graf, Elle, zynix, William Richards, Stanislav Ovsiannikov, Edmond Seymore, Jonathan Leane, Martin Kemka, usrbinkat, Enrico Ros Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> # Original model card: Mistral AI_'s Mixtral 8X7B Instruct v0.1 # Model Card for Mixtral-8x7B The Mixtral-8x7B Large Language Model (LLM) is a pretrained generative Sparse Mixture of Experts. The Mixtral-8x7B outperforms Llama 2 70B on most benchmarks we tested. For full details of this model please read our [release blog post](https://mistral.ai/news/mixtral-of-experts/). ## Warning This repo contains weights that are compatible with [vLLM](https://github.com/vllm-project/vllm) serving of the model as well as Hugging Face [transformers](https://github.com/huggingface/transformers) library. It is based on the original Mixtral [torrent release](magnet:?xt=urn:btih:5546272da9065eddeb6fcd7ffddeef5b75be79a7&dn=mixtral-8x7b-32kseqlen&tr=udp%3A%2F%http://2Fopentracker.i2p.rocks%3A6969%2Fannounce&tr=http%3A%2F%http://2Ftracker.openbittorrent.com%3A80%2Fannounce), but the file format and parameter names are different. Please note that model cannot (yet) be instantiated with HF. ## Instruction format This format must be strictly respected, otherwise the model will generate sub-optimal outputs. The template used to build a prompt for the Instruct model is defined as follows: ``` <s> [INST] Instruction [/INST] Model answer</s> [INST] Follow-up instruction [/INST] ``` Note that `<s>` and `</s>` are special tokens for beginning of string (BOS) and end of string (EOS) while [INST] and [/INST] are regular strings. As reference, here is the pseudo-code used to tokenize instructions during fine-tuning: ```python def tokenize(text): return tok.encode(text, add_special_tokens=False) [BOS_ID] + tokenize("[INST]") + tokenize(USER_MESSAGE_1) + tokenize("[/INST]") + tokenize(BOT_MESSAGE_1) + [EOS_ID] + … tokenize("[INST]") + tokenize(USER_MESSAGE_N) + tokenize("[/INST]") + tokenize(BOT_MESSAGE_N) + [EOS_ID] ``` In the pseudo-code above, note that the `tokenize` method should not add a BOS or EOS token automatically, but should add a prefix space. ## Run the model ```python from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) text = "Hello my name is" inputs = tokenizer(text, return_tensors="pt") outputs = model.generate(**inputs, max_new_tokens=20) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` By default, transformers will load the model in full precision. Therefore you might be interested to further reduce down the memory requirements to run the model through the optimizations we offer in HF ecosystem: ### In half-precision Note `float16` precision only works on GPU devices <details> <summary> Click to expand </summary> ```diff + import torch from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1" tokenizer = AutoTokenizer.from_pretrained(model_id) + model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16).to(0) text = "Hello my name is" + inputs = tokenizer(text, return_tensors="pt").to(0) outputs = model.generate(**inputs, max_new_tokens=20) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` </details> ### Lower precision using (8-bit & 4-bit) using `bitsandbytes` <details> <summary> Click to expand </summary> ```diff + import torch from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1" tokenizer = AutoTokenizer.from_pretrained(model_id) + model = AutoModelForCausalLM.from_pretrained(model_id, load_in_4bit=True) text = "Hello my name is" + inputs = tokenizer(text, return_tensors="pt").to(0) outputs = model.generate(**inputs, max_new_tokens=20) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` </details> ### Load the model with Flash Attention 2 <details> <summary> Click to expand </summary> ```diff + import torch from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1" tokenizer = AutoTokenizer.from_pretrained(model_id) + model = AutoModelForCausalLM.from_pretrained(model_id, use_flash_attention_2=True) text = "Hello my name is" + inputs = tokenizer(text, return_tensors="pt").to(0) outputs = model.generate(**inputs, max_new_tokens=20) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` </details> ## Limitations The Mixtral-8x7B Instruct model is a quick demonstration that the base model can be easily fine-tuned to achieve compelling performance. It does not have any moderation mechanisms. We're looking forward to engaging with the community on ways to make the model finely respect guardrails, allowing for deployment in environments requiring moderated outputs. # The Mistral AI Team Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, Gianna Lengyel, Guillaume Bour, Guillaume Lample, Lélio Renard Lavaud, Louis Ternon, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Théophile Gervet, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed.
microsoft/table-transformer-structure-recognition-v1.1-all
microsoft
"2023-11-18T21:58:10Z"
108,006
29
transformers
[ "transformers", "safetensors", "table-transformer", "object-detection", "arxiv:2303.00716", "license:mit", "endpoints_compatible", "has_space", "region:us" ]
object-detection
"2023-11-18T21:33:25Z"
--- license: mit --- # Table Transformer (pre-trained for Table Structure Recognition) Table Transformer (TATR) model trained on PubTables1M and FinTabNet.c. It was introduced in the paper [Aligning benchmark datasets for table structure recognition](https://arxiv.org/abs/2303.00716) by Smock et al. and first released in [this repository](https://github.com/microsoft/table-transformer). Disclaimer: The team releasing Table Transformer did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The Table Transformer is equivalent to [DETR](https://huggingface.co/docs/transformers/model_doc/detr), a Transformer-based object detection model. Note that the authors decided to use the "normalize before" setting of DETR, which means that layernorm is applied before self- and cross-attention. ## Usage You can use the raw model for detecting tables in documents. See the [documentation](https://huggingface.co/docs/transformers/main/en/model_doc/table-transformer) for more info.
Systran/faster-whisper-large-v3
Systran
"2023-11-23T09:41:12Z"
107,765
105
ctranslate2
[ "ctranslate2", "audio", "automatic-speech-recognition", "en", "zh", "de", "es", "ru", "ko", "fr", "ja", "pt", "tr", "pl", "ca", "nl", "ar", "sv", "it", "id", "hi", "fi", "vi", "he", "uk", "el", "ms", "cs", "ro", "da", "hu", "ta", "no", "th", "ur", "hr", "bg", "lt", "la", "mi", "ml", "cy", "sk", "te", "fa", "lv", "bn", "sr", "az", "sl", "kn", "et", "mk", "br", "eu", "is", "hy", "ne", "mn", "bs", "kk", "sq", "sw", "gl", "mr", "pa", "si", "km", "sn", "yo", "so", "af", "oc", "ka", "be", "tg", "sd", "gu", "am", "yi", "lo", "uz", "fo", "ht", "ps", "tk", "nn", "mt", "sa", "lb", "my", "bo", "tl", "mg", "as", "tt", "haw", "ln", "ha", "ba", "jw", "su", "yue", "license:mit", "region:us", "has_space" ]
automatic-speech-recognition
"2023-11-23T09:34:20Z"
--- language: - en - zh - de - es - ru - ko - fr - ja - pt - tr - pl - ca - nl - ar - sv - it - id - hi - fi - vi - he - uk - el - ms - cs - ro - da - hu - ta - 'no' - th - ur - hr - bg - lt - la - mi - ml - cy - sk - te - fa - lv - bn - sr - az - sl - kn - et - mk - br - eu - is - hy - ne - mn - bs - kk - sq - sw - gl - mr - pa - si - km - sn - yo - so - af - oc - ka - be - tg - sd - gu - am - yi - lo - uz - fo - ht - ps - tk - nn - mt - sa - lb - my - bo - tl - mg - as - tt - haw - ln - ha - ba - jw - su - yue tags: - audio - automatic-speech-recognition license: mit library_name: ctranslate2 --- # Whisper large-v3 model for CTranslate2 This repository contains the conversion of [openai/whisper-large-v3](https://huggingface.co/openai/whisper-large-v3) to the [CTranslate2](https://github.com/OpenNMT/CTranslate2) model format. This model can be used in CTranslate2 or projects based on CTranslate2 such as [faster-whisper](https://github.com/systran/faster-whisper). ## Example ```python from faster_whisper import WhisperModel model = WhisperModel("large-v3") segments, info = model.transcribe("audio.mp3") for segment in segments: print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text)) ``` ## Conversion details The original model was converted with the following command: ``` ct2-transformers-converter --model openai/whisper-large-v3 --output_dir faster-whisper-large-v3 \ --copy_files tokenizer.json preprocessor_config.json --quantization float16 ``` Note that the model weights are saved in FP16. This type can be changed when the model is loaded using the [`compute_type` option in CTranslate2](https://opennmt.net/CTranslate2/quantization.html). ## More information **For more information about the original model, see its [model card](https://huggingface.co/openai/whisper-large-v3).**
shibing624/text2vec-base-multilingual
shibing624
"2024-02-19T08:20:25Z"
107,701
35
sentence-transformers
[ "sentence-transformers", "pytorch", "safetensors", "bert", "feature-extraction", "sentence-similarity", "transformers", "text2vec", "mteb", "zh", "en", "de", "fr", "it", "nl", "pt", "pl", "ru", "dataset:shibing624/nli-zh-all", "license:apache-2.0", "model-index", "endpoints_compatible", "has_space", "region:us" ]
sentence-similarity
"2023-06-22T06:28:12Z"
--- pipeline_tag: sentence-similarity license: apache-2.0 library_name: sentence-transformers tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers - text2vec - mteb datasets: - shibing624/nli-zh-all language: - zh - en - de - fr - it - nl - pt - pl - ru metrics: - spearmanr model-index: - name: text2vec-base-multilingual results: - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (en) config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 70.97014925373134 - type: ap value: 33.95151328318672 - type: f1 value: 65.14740155705596 - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (de) config: de split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 68.69379014989293 - type: ap value: 79.68277579733802 - type: f1 value: 66.54960052336921 - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (en-ext) config: en-ext split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 70.90704647676162 - type: ap value: 20.747518928580437 - type: f1 value: 58.64365465884924 - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (ja) config: ja split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 61.605995717344754 - type: ap value: 14.135974879487028 - type: f1 value: 49.980224800472136 - task: type: Classification dataset: type: mteb/amazon_polarity name: MTEB AmazonPolarityClassification config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 66.103375 - type: ap value: 61.10087197664471 - type: f1 value: 65.75198509894145 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (en) config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 33.134 - type: f1 value: 32.7905397597083 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (de) config: de split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 33.388 - type: f1 value: 33.190561196873084 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (es) config: es split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 34.824 - type: f1 value: 34.297290157740726 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (fr) config: fr split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 33.449999999999996 - type: f1 value: 33.08017234412433 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (ja) config: ja split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 30.046 - type: f1 value: 29.857141661482228 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (zh) config: zh split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 32.522 - type: f1 value: 31.854699911472174 - task: type: Clustering dataset: type: mteb/arxiv-clustering-p2p name: MTEB ArxivClusteringP2P config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 32.31918856561886 - task: type: Clustering dataset: type: mteb/arxiv-clustering-s2s name: MTEB ArxivClusteringS2S config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 25.503481615956137 - task: type: Reranking dataset: type: mteb/askubuntudupquestions-reranking name: MTEB AskUbuntuDupQuestions config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 57.91471462820568 - type: mrr value: 71.82990370663501 - task: type: STS dataset: type: mteb/biosses-sts name: MTEB BIOSSES config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 68.83853315193127 - type: cos_sim_spearman value: 66.16174850417771 - type: euclidean_pearson value: 56.65313897263153 - type: euclidean_spearman value: 52.69156205876939 - type: manhattan_pearson value: 56.97282154658304 - type: manhattan_spearman value: 53.167476517261015 - task: type: Classification dataset: type: mteb/banking77 name: MTEB Banking77Classification config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 78.08441558441558 - type: f1 value: 77.99825264827898 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-p2p name: MTEB BiorxivClusteringP2P config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 28.98583420521256 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-s2s name: MTEB BiorxivClusteringS2S config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 23.195091778460892 - task: type: Classification dataset: type: mteb/emotion name: MTEB EmotionClassification config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 43.35 - type: f1 value: 38.80269436557695 - task: type: Classification dataset: type: mteb/imdb name: MTEB ImdbClassification config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 59.348 - type: ap value: 55.75065220262251 - type: f1 value: 58.72117519082607 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (en) config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 81.04879160966712 - type: f1 value: 80.86889779192701 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (de) config: de split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 78.59397013243168 - type: f1 value: 77.09902761555972 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (es) config: es split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 79.24282855236824 - type: f1 value: 78.75883867079015 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (fr) config: fr split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 76.16661446915127 - type: f1 value: 76.30204722831901 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (hi) config: hi split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 78.74506991753317 - type: f1 value: 77.50560442779701 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (th) config: th split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 77.67088607594937 - type: f1 value: 77.21442956887493 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (en) config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 62.786137710898316 - type: f1 value: 46.23474201126368 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (de) config: de split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 55.285996055226825 - type: f1 value: 37.98039513682919 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (es) config: es split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 58.67911941294196 - type: f1 value: 40.541410807124954 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (fr) config: fr split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 53.257124960851854 - type: f1 value: 38.42982319259366 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (hi) config: hi split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 59.62352097525995 - type: f1 value: 41.28886486568534 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (th) config: th split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 58.799276672694404 - type: f1 value: 43.68379466247341 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (af) config: af split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 45.42030934767989 - type: f1 value: 44.12201543566376 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (am) config: am split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 37.67652992602556 - type: f1 value: 35.422091900843164 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ar) config: ar split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 45.02353732347007 - type: f1 value: 41.852484084738194 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (az) config: az split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 48.70880968392737 - type: f1 value: 46.904360615435046 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (bn) config: bn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 43.78950907868191 - type: f1 value: 41.58872353920405 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (cy) config: cy split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 28.759246805648957 - type: f1 value: 27.41182001374226 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (da) config: da split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 56.74176193678547 - type: f1 value: 53.82727354182497 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (de) config: de split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 51.55682582380632 - type: f1 value: 49.41963627941866 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (el) config: el split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 56.46940147948891 - type: f1 value: 55.28178711367465 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (en) config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 63.83322125084063 - type: f1 value: 61.836172900845554 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (es) config: es split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.27505043712172 - type: f1 value: 57.642436374361154 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (fa) config: fa split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 59.05178211163417 - type: f1 value: 56.858998820504056 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (fi) config: fi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 57.357094821788834 - type: f1 value: 54.79711189260453 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (fr) config: fr split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.79959650302623 - type: f1 value: 57.59158671719513 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (he) config: he split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 51.1768661735037 - type: f1 value: 48.886397276270515 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (hi) config: hi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 57.06455951580362 - type: f1 value: 55.01530952684585 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (hu) config: hu split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.3591123066577 - type: f1 value: 55.9277783370191 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (hy) config: hy split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 52.108271687962336 - type: f1 value: 51.195023400664596 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (id) config: id split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.26832548755883 - type: f1 value: 56.60774065423401 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (is) config: is split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 35.806993947545394 - type: f1 value: 34.290418953173294 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (it) config: it split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.27841291190315 - type: f1 value: 56.9438998642419 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ja) config: ja split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 60.78009414929389 - type: f1 value: 59.15780842483667 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (jv) config: jv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 31.153328850033624 - type: f1 value: 30.11004596099605 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ka) config: ka split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 44.50235373234701 - type: f1 value: 44.040585262624745 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (km) config: km split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 40.99193006052455 - type: f1 value: 39.505480119272484 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (kn) config: kn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 46.95696032279758 - type: f1 value: 43.093638940785326 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ko) config: ko split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 54.73100201748486 - type: f1 value: 52.79750744404114 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (lv) config: lv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 54.865501008742434 - type: f1 value: 53.64798408964839 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ml) config: ml split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 47.891728312037664 - type: f1 value: 45.261229414636055 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (mn) config: mn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 52.2259583053127 - type: f1 value: 50.5903419246987 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ms) config: ms split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 54.277067921990586 - type: f1 value: 52.472042479965886 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (my) config: my split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 51.95696032279757 - type: f1 value: 49.79330411854258 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (nb) config: nb split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 54.63685272360457 - type: f1 value: 52.81267480650003 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (nl) config: nl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 59.451916610625425 - type: f1 value: 57.34790386645091 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (pl) config: pl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.91055817081372 - type: f1 value: 56.39195048528157 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (pt) config: pt split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 59.84196368527236 - type: f1 value: 58.72244763127063 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ro) config: ro split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 57.04102219233354 - type: f1 value: 55.67040186148946 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ru) config: ru split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.01613987895091 - type: f1 value: 57.203949825484855 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (sl) config: sl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 56.35843981170141 - type: f1 value: 54.18656338999773 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (sq) config: sq split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 56.47948890383322 - type: f1 value: 54.772224557130954 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (sv) config: sv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.43981170141224 - type: f1 value: 56.09260971364242 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (sw) config: sw split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 33.9609952925353 - type: f1 value: 33.18853392353405 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ta) config: ta split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 44.29388029589778 - type: f1 value: 41.51986533284474 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (te) config: te split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 47.13517148621385 - type: f1 value: 43.94784138379624 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (th) config: th split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 56.856086079354405 - type: f1 value: 56.618177384748456 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (tl) config: tl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 35.35978480161398 - type: f1 value: 34.060680080365046 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (tr) config: tr split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 59.630127774041696 - type: f1 value: 57.46288652988266 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (ur) config: ur split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 52.7908540685945 - type: f1 value: 51.46934239116157 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (vi) config: vi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 54.6469401479489 - type: f1 value: 53.9903066185816 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (zh-CN) config: zh-CN split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 60.85743106926698 - type: f1 value: 59.31579548450755 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (zh-TW) config: zh-TW split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 57.46805648957633 - type: f1 value: 57.48469733657326 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (af) config: af split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 50.86415601882985 - type: f1 value: 49.41696672602645 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (am) config: am split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 41.183591123066584 - type: f1 value: 40.04563865770774 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ar) config: ar split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 50.08069939475455 - type: f1 value: 50.724800165846126 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (az) config: az split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 51.287827841291204 - type: f1 value: 50.72873776739851 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (bn) config: bn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 46.53328850033624 - type: f1 value: 45.93317866639667 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (cy) config: cy split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 34.347679892400805 - type: f1 value: 31.941581141280828 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (da) config: da split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 63.073301950235376 - type: f1 value: 62.228728940111054 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (de) config: de split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 56.398789509078675 - type: f1 value: 54.80778341609032 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (el) config: el split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 61.79892400806993 - type: f1 value: 60.69430756982446 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (en) config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 66.96368527236046 - type: f1 value: 66.5893927997656 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (es) config: es split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.21250840618695 - type: f1 value: 62.347177794128925 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (fa) config: fa split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.43779421654339 - type: f1 value: 61.307701312085605 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (fi) config: fi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 61.09952925353059 - type: f1 value: 60.313907927386914 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (fr) config: fr split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 63.38601210490922 - type: f1 value: 63.05968938353488 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (he) config: he split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 56.2878278412912 - type: f1 value: 55.92927644838597 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (hi) config: hi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 60.62878278412912 - type: f1 value: 60.25299253652635 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (hu) config: hu split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 63.28850033624748 - type: f1 value: 62.77053246337031 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (hy) config: hy split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 54.875588433086754 - type: f1 value: 54.30717357279134 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (id) config: id split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 61.99394754539341 - type: f1 value: 61.73085530883037 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (is) config: is split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 38.581035642232685 - type: f1 value: 36.96287269695893 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (it) config: it split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.350369872225976 - type: f1 value: 61.807327324823966 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ja) config: ja split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 65.17148621385338 - type: f1 value: 65.29620144656751 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (jv) config: jv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 36.12642905178212 - type: f1 value: 35.334393048479484 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ka) config: ka split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 50.26899798251513 - type: f1 value: 49.041065960139434 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (km) config: km split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 44.24344317417619 - type: f1 value: 42.42177854872125 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (kn) config: kn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 47.370544720914594 - type: f1 value: 46.589722581465324 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ko) config: ko split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 58.89038332212508 - type: f1 value: 57.753607921990394 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (lv) config: lv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 56.506388702084756 - type: f1 value: 56.0485860423295 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ml) config: ml split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 50.06388702084734 - type: f1 value: 50.109364641824584 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (mn) config: mn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 55.053799596503026 - type: f1 value: 54.490665705666686 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ms) config: ms split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 59.77135171486213 - type: f1 value: 58.2808650158803 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (my) config: my split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 55.71620712844654 - type: f1 value: 53.863034882475304 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (nb) config: nb split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 60.26227303295225 - type: f1 value: 59.86604657147016 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (nl) config: nl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 63.3759246805649 - type: f1 value: 62.45257339288533 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (pl) config: pl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.552118359112306 - type: f1 value: 61.354449605776765 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (pt) config: pt split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.40753194351043 - type: f1 value: 61.98779889528889 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ro) config: ro split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 60.68258238063214 - type: f1 value: 60.59973978976571 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ru) config: ru split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.31002017484868 - type: f1 value: 62.412312268503655 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (sl) config: sl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 61.429051782111635 - type: f1 value: 61.60095590401424 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (sq) config: sq split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.229320780094156 - type: f1 value: 61.02251426747547 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (sv) config: sv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 64.42501681237391 - type: f1 value: 63.461494430605235 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (sw) config: sw split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 38.51714862138534 - type: f1 value: 37.12466722986362 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ta) config: ta split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 46.99731002017485 - type: f1 value: 45.859147049984834 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (te) config: te split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 51.01882985877605 - type: f1 value: 49.01040173136056 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (th) config: th split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 63.234700739744454 - type: f1 value: 62.732294595214746 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (tl) config: tl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 38.72225958305312 - type: f1 value: 36.603231928120906 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (tr) config: tr split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 64.48554135843982 - type: f1 value: 63.97380562022752 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (ur) config: ur split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 56.7955615332885 - type: f1 value: 55.95308241204802 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (vi) config: vi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 57.06455951580362 - type: f1 value: 56.95570494066693 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (zh-CN) config: zh-CN split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 65.8338937457969 - type: f1 value: 65.6778746906008 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (zh-TW) config: zh-TW split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 63.369199731002034 - type: f1 value: 63.527650116059945 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-p2p name: MTEB MedrxivClusteringP2P config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 29.442504112215538 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-s2s name: MTEB MedrxivClusteringS2S config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 26.16062814161053 - task: type: Retrieval dataset: type: quora name: MTEB QuoraRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 65.319 - type: map_at_10 value: 78.72 - type: map_at_100 value: 79.44600000000001 - type: map_at_1000 value: 79.469 - type: map_at_3 value: 75.693 - type: map_at_5 value: 77.537 - type: mrr_at_1 value: 75.24 - type: mrr_at_10 value: 82.304 - type: mrr_at_100 value: 82.485 - type: mrr_at_1000 value: 82.489 - type: mrr_at_3 value: 81.002 - type: mrr_at_5 value: 81.817 - type: ndcg_at_1 value: 75.26 - type: ndcg_at_10 value: 83.07 - type: ndcg_at_100 value: 84.829 - type: ndcg_at_1000 value: 85.087 - type: ndcg_at_3 value: 79.67699999999999 - type: ndcg_at_5 value: 81.42 - type: precision_at_1 value: 75.26 - type: precision_at_10 value: 12.697 - type: precision_at_100 value: 1.4829999999999999 - type: precision_at_1000 value: 0.154 - type: precision_at_3 value: 34.849999999999994 - type: precision_at_5 value: 23.054 - type: recall_at_1 value: 65.319 - type: recall_at_10 value: 91.551 - type: recall_at_100 value: 98.053 - type: recall_at_1000 value: 99.516 - type: recall_at_3 value: 81.819 - type: recall_at_5 value: 86.66199999999999 - task: type: Clustering dataset: type: mteb/reddit-clustering name: MTEB RedditClustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 31.249791587189996 - task: type: Clustering dataset: type: mteb/reddit-clustering-p2p name: MTEB RedditClusteringP2P config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 43.302922383029816 - task: type: STS dataset: type: mteb/sickr-sts name: MTEB SICK-R config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 84.80670811345861 - type: cos_sim_spearman value: 79.97373018384307 - type: euclidean_pearson value: 83.40205934125837 - type: euclidean_spearman value: 79.73331008251854 - type: manhattan_pearson value: 83.3320983393412 - type: manhattan_spearman value: 79.677919746045 - task: type: STS dataset: type: mteb/sts12-sts name: MTEB STS12 config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 86.3816087627948 - type: cos_sim_spearman value: 80.91314664846955 - type: euclidean_pearson value: 85.10603071031096 - type: euclidean_spearman value: 79.42663939501841 - type: manhattan_pearson value: 85.16096376014066 - type: manhattan_spearman value: 79.51936545543191 - task: type: STS dataset: type: mteb/sts13-sts name: MTEB STS13 config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 80.44665329940209 - type: cos_sim_spearman value: 82.86479010707745 - type: euclidean_pearson value: 84.06719627734672 - type: euclidean_spearman value: 84.9356099976297 - type: manhattan_pearson value: 84.10370009572624 - type: manhattan_spearman value: 84.96828040546536 - task: type: STS dataset: type: mteb/sts14-sts name: MTEB STS14 config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 86.05704260568437 - type: cos_sim_spearman value: 87.36399473803172 - type: euclidean_pearson value: 86.8895170159388 - type: euclidean_spearman value: 87.16246440866921 - type: manhattan_pearson value: 86.80814774538997 - type: manhattan_spearman value: 87.09320142699522 - task: type: STS dataset: type: mteb/sts15-sts name: MTEB STS15 config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 85.97825118945852 - type: cos_sim_spearman value: 88.31438033558268 - type: euclidean_pearson value: 87.05174694758092 - type: euclidean_spearman value: 87.80659468392355 - type: manhattan_pearson value: 86.98831322198717 - type: manhattan_spearman value: 87.72820615049285 - task: type: STS dataset: type: mteb/sts16-sts name: MTEB STS16 config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 78.68745420126719 - type: cos_sim_spearman value: 81.6058424699445 - type: euclidean_pearson value: 81.16540133861879 - type: euclidean_spearman value: 81.86377535458067 - type: manhattan_pearson value: 81.13813317937021 - type: manhattan_spearman value: 81.87079962857256 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (ko-ko) config: ko-ko split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 68.06192660936868 - type: cos_sim_spearman value: 68.2376353514075 - type: euclidean_pearson value: 60.68326946956215 - type: euclidean_spearman value: 59.19352349785952 - type: manhattan_pearson value: 60.6592944683418 - type: manhattan_spearman value: 59.167534419270865 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (ar-ar) config: ar-ar split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 76.78098264855684 - type: cos_sim_spearman value: 78.02670452969812 - type: euclidean_pearson value: 77.26694463661255 - type: euclidean_spearman value: 77.47007626009587 - type: manhattan_pearson value: 77.25070088632027 - type: manhattan_spearman value: 77.36368265830724 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-ar) config: en-ar split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 78.45418506379532 - type: cos_sim_spearman value: 78.60412019902428 - type: euclidean_pearson value: 79.90303710850512 - type: euclidean_spearman value: 78.67123625004957 - type: manhattan_pearson value: 80.09189580897753 - type: manhattan_spearman value: 79.02484481441483 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-de) config: en-de split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 82.35556731232779 - type: cos_sim_spearman value: 81.48249735354844 - type: euclidean_pearson value: 81.66748026636621 - type: euclidean_spearman value: 80.35571574338547 - type: manhattan_pearson value: 81.38214732806365 - type: manhattan_spearman value: 79.9018202958774 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-en) config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 86.4527703176897 - type: cos_sim_spearman value: 85.81084095829584 - type: euclidean_pearson value: 86.43489162324457 - type: euclidean_spearman value: 85.27110976093296 - type: manhattan_pearson value: 86.43674259444512 - type: manhattan_spearman value: 85.05719308026032 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-tr) config: en-tr split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 76.00411240034492 - type: cos_sim_spearman value: 76.33887356560854 - type: euclidean_pearson value: 76.81730660019446 - type: euclidean_spearman value: 75.04432185451306 - type: manhattan_pearson value: 77.22298813168995 - type: manhattan_spearman value: 75.56420330256725 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (es-en) config: es-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 79.1447136836213 - type: cos_sim_spearman value: 81.80823850788917 - type: euclidean_pearson value: 80.84505734814422 - type: euclidean_spearman value: 81.714168092736 - type: manhattan_pearson value: 80.84713816174187 - type: manhattan_spearman value: 81.61267814749516 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (es-es) config: es-es split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 87.01257457052873 - type: cos_sim_spearman value: 87.91146458004216 - type: euclidean_pearson value: 88.36771859717994 - type: euclidean_spearman value: 87.73182474597515 - type: manhattan_pearson value: 88.26551451003671 - type: manhattan_spearman value: 87.71675151388992 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (fr-en) config: fr-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 79.20121618382373 - type: cos_sim_spearman value: 78.05794691968603 - type: euclidean_pearson value: 79.93819925682054 - type: euclidean_spearman value: 78.00586118701553 - type: manhattan_pearson value: 80.05598625820885 - type: manhattan_spearman value: 78.04802948866832 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (it-en) config: it-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 81.51743373871778 - type: cos_sim_spearman value: 80.98266651818703 - type: euclidean_pearson value: 81.11875722505269 - type: euclidean_spearman value: 79.45188413284538 - type: manhattan_pearson value: 80.7988457619225 - type: manhattan_spearman value: 79.49643569311485 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (nl-en) config: nl-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 81.78679924046351 - type: cos_sim_spearman value: 80.9986574147117 - type: euclidean_pearson value: 82.09130079135713 - type: euclidean_spearman value: 80.66215667390159 - type: manhattan_pearson value: 82.0328610549654 - type: manhattan_spearman value: 80.31047226932408 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (en) config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 58.08082172994642 - type: cos_sim_spearman value: 62.9940530222459 - type: euclidean_pearson value: 58.47927303460365 - type: euclidean_spearman value: 60.8440317609258 - type: manhattan_pearson value: 58.32438211697841 - type: manhattan_spearman value: 60.69642636776064 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (de) config: de split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 33.83985707464123 - type: cos_sim_spearman value: 46.89093209603036 - type: euclidean_pearson value: 34.63602187576556 - type: euclidean_spearman value: 46.31087228200712 - type: manhattan_pearson value: 34.66899391543166 - type: manhattan_spearman value: 46.33049538425276 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (es) config: es split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 51.61315965767736 - type: cos_sim_spearman value: 58.9434266730386 - type: euclidean_pearson value: 50.35885602217862 - type: euclidean_spearman value: 58.238679883286025 - type: manhattan_pearson value: 53.01732044381151 - type: manhattan_spearman value: 58.10482351761412 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (pl) config: pl split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 26.771738440430177 - type: cos_sim_spearman value: 34.807259227816054 - type: euclidean_pearson value: 17.82657835823811 - type: euclidean_spearman value: 34.27912898498941 - type: manhattan_pearson value: 19.121527758886312 - type: manhattan_spearman value: 34.4940050226265 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (tr) config: tr split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 52.8354704676683 - type: cos_sim_spearman value: 57.28629534815841 - type: euclidean_pearson value: 54.10329332004385 - type: euclidean_spearman value: 58.15030615859976 - type: manhattan_pearson value: 55.42372087433115 - type: manhattan_spearman value: 57.52270736584036 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (ar) config: ar split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 31.01976557986924 - type: cos_sim_spearman value: 54.506959483927616 - type: euclidean_pearson value: 36.917863022119086 - type: euclidean_spearman value: 53.750194241538566 - type: manhattan_pearson value: 37.200177833241085 - type: manhattan_spearman value: 53.507659188082535 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (ru) config: ru split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 46.38635647225934 - type: cos_sim_spearman value: 54.50892732637536 - type: euclidean_pearson value: 40.8331015184763 - type: euclidean_spearman value: 53.142903182230924 - type: manhattan_pearson value: 43.07655692906317 - type: manhattan_spearman value: 53.5833474125901 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (zh) config: zh split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 60.52525456662916 - type: cos_sim_spearman value: 63.23975489531082 - type: euclidean_pearson value: 58.989191722317514 - type: euclidean_spearman value: 62.536326639863894 - type: manhattan_pearson value: 61.32982866201855 - type: manhattan_spearman value: 63.068262822520516 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (fr) config: fr split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 59.63798684577696 - type: cos_sim_spearman value: 74.09937723367189 - type: euclidean_pearson value: 63.77494904383906 - type: euclidean_spearman value: 71.15932571292481 - type: manhattan_pearson value: 63.69646122775205 - type: manhattan_spearman value: 70.54960698541632 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (de-en) config: de-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 36.50262468726711 - type: cos_sim_spearman value: 45.00322499674274 - type: euclidean_pearson value: 32.58759216581778 - type: euclidean_spearman value: 40.13720951315429 - type: manhattan_pearson value: 34.88422299605277 - type: manhattan_spearman value: 40.63516862200963 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (es-en) config: es-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 56.498552617040275 - type: cos_sim_spearman value: 67.71358426124443 - type: euclidean_pearson value: 57.16474781778287 - type: euclidean_spearman value: 65.721515493531 - type: manhattan_pearson value: 59.25227610738926 - type: manhattan_spearman value: 65.89743680340739 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (it) config: it split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 55.97978814727984 - type: cos_sim_spearman value: 65.85821395092104 - type: euclidean_pearson value: 59.11117270978519 - type: euclidean_spearman value: 64.50062069934965 - type: manhattan_pearson value: 59.4436213778161 - type: manhattan_spearman value: 64.4003273074382 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (pl-en) config: pl-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 58.00873192515712 - type: cos_sim_spearman value: 60.167708809138745 - type: euclidean_pearson value: 56.91950637760252 - type: euclidean_spearman value: 58.50593399441014 - type: manhattan_pearson value: 58.683747352584994 - type: manhattan_spearman value: 59.38110066799761 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (zh-en) config: zh-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 54.26020658151187 - type: cos_sim_spearman value: 61.29236187204147 - type: euclidean_pearson value: 55.993896804147056 - type: euclidean_spearman value: 58.654928232615354 - type: manhattan_pearson value: 56.612492816099426 - type: manhattan_spearman value: 58.65144067094258 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (es-it) config: es-it split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 49.13817835368122 - type: cos_sim_spearman value: 50.78524216975442 - type: euclidean_pearson value: 46.56046454501862 - type: euclidean_spearman value: 50.3935060082369 - type: manhattan_pearson value: 48.0232348418531 - type: manhattan_spearman value: 50.79528358464199 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (de-fr) config: de-fr split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 44.274388638585286 - type: cos_sim_spearman value: 49.43124017389838 - type: euclidean_pearson value: 42.45909582681174 - type: euclidean_spearman value: 49.661383797129055 - type: manhattan_pearson value: 42.5771970142383 - type: manhattan_spearman value: 50.14423414390715 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (de-pl) config: de-pl split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 26.119500839749776 - type: cos_sim_spearman value: 39.324070169024424 - type: euclidean_pearson value: 35.83247077201831 - type: euclidean_spearman value: 42.61903924348457 - type: manhattan_pearson value: 35.50415034487894 - type: manhattan_spearman value: 41.87998075949351 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (fr-pl) config: fr-pl split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 72.62575835691209 - type: cos_sim_spearman value: 73.24670207647144 - type: euclidean_pearson value: 78.07793323914657 - type: euclidean_spearman value: 73.24670207647144 - type: manhattan_pearson value: 77.51429306378206 - type: manhattan_spearman value: 73.24670207647144 - task: type: STS dataset: type: mteb/stsbenchmark-sts name: MTEB STSBenchmark config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 84.09375596849891 - type: cos_sim_spearman value: 86.44881302053585 - type: euclidean_pearson value: 84.71259163967213 - type: euclidean_spearman value: 85.63661992344069 - type: manhattan_pearson value: 84.64466537502614 - type: manhattan_spearman value: 85.53769949940238 - task: type: Reranking dataset: type: mteb/scidocs-reranking name: MTEB SciDocsRR config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 70.2056154684549 - type: mrr value: 89.52703161036494 - task: type: PairClassification dataset: type: mteb/sprintduplicatequestions-pairclassification name: MTEB SprintDuplicateQuestions config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.57623762376238 - type: cos_sim_ap value: 83.53051588811371 - type: cos_sim_f1 value: 77.72704211060375 - type: cos_sim_precision value: 78.88774459320288 - type: cos_sim_recall value: 76.6 - type: dot_accuracy value: 99.06435643564356 - type: dot_ap value: 27.003124923857463 - type: dot_f1 value: 34.125269978401725 - type: dot_precision value: 37.08920187793427 - type: dot_recall value: 31.6 - type: euclidean_accuracy value: 99.61485148514852 - type: euclidean_ap value: 85.47332647001774 - type: euclidean_f1 value: 80.0808897876643 - type: euclidean_precision value: 80.98159509202453 - type: euclidean_recall value: 79.2 - type: manhattan_accuracy value: 99.61683168316831 - type: manhattan_ap value: 85.41969859598552 - type: manhattan_f1 value: 79.77755308392315 - type: manhattan_precision value: 80.67484662576688 - type: manhattan_recall value: 78.9 - type: max_accuracy value: 99.61683168316831 - type: max_ap value: 85.47332647001774 - type: max_f1 value: 80.0808897876643 - task: type: Clustering dataset: type: mteb/stackexchange-clustering name: MTEB StackExchangeClustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 34.35688940053467 - task: type: Clustering dataset: type: mteb/stackexchange-clustering-p2p name: MTEB StackExchangeClusteringP2P config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 30.64427069276576 - task: type: Reranking dataset: type: mteb/stackoverflowdupquestions-reranking name: MTEB StackOverflowDupQuestions config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 44.89500754900078 - type: mrr value: 45.33215558950853 - task: type: Summarization dataset: type: mteb/summeval name: MTEB SummEval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 30.653069624224084 - type: cos_sim_spearman value: 30.10187112430319 - type: dot_pearson value: 28.966278202103666 - type: dot_spearman value: 28.342234095507767 - task: type: Classification dataset: type: mteb/toxic_conversations_50k name: MTEB ToxicConversationsClassification config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 65.96839999999999 - type: ap value: 11.846327590186444 - type: f1 value: 50.518102944693574 - task: type: Classification dataset: type: mteb/tweet_sentiment_extraction name: MTEB TweetSentimentExtractionClassification config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 55.220713073005086 - type: f1 value: 55.47856175692088 - task: type: Clustering dataset: type: mteb/twentynewsgroups-clustering name: MTEB TwentyNewsgroupsClustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 31.581473892235877 - task: type: PairClassification dataset: type: mteb/twittersemeval2015-pairclassification name: MTEB TwitterSemEval2015 config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 82.94093103653812 - type: cos_sim_ap value: 62.48963249213361 - type: cos_sim_f1 value: 58.9541137429912 - type: cos_sim_precision value: 52.05091937765205 - type: cos_sim_recall value: 67.96833773087072 - type: dot_accuracy value: 78.24998509864696 - type: dot_ap value: 40.82371294480071 - type: dot_f1 value: 44.711163153786096 - type: dot_precision value: 35.475379374419326 - type: dot_recall value: 60.4485488126649 - type: euclidean_accuracy value: 83.13166835548668 - type: euclidean_ap value: 63.459878609769774 - type: euclidean_f1 value: 60.337199569532466 - type: euclidean_precision value: 55.171659741963694 - type: euclidean_recall value: 66.56992084432719 - type: manhattan_accuracy value: 83.00649698992669 - type: manhattan_ap value: 63.263161177904905 - type: manhattan_f1 value: 60.17122874713614 - type: manhattan_precision value: 55.40750610703975 - type: manhattan_recall value: 65.8311345646438 - type: max_accuracy value: 83.13166835548668 - type: max_ap value: 63.459878609769774 - type: max_f1 value: 60.337199569532466 - task: type: PairClassification dataset: type: mteb/twitterurlcorpus-pairclassification name: MTEB TwitterURLCorpus config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 87.80416812201653 - type: cos_sim_ap value: 83.45540469219863 - type: cos_sim_f1 value: 75.58836427422892 - type: cos_sim_precision value: 71.93934335002783 - type: cos_sim_recall value: 79.62734832152756 - type: dot_accuracy value: 83.04226336011176 - type: dot_ap value: 70.63007268018524 - type: dot_f1 value: 65.35980325765405 - type: dot_precision value: 60.84677151768532 - type: dot_recall value: 70.59593470896212 - type: euclidean_accuracy value: 87.60430007373773 - type: euclidean_ap value: 83.10068502536592 - type: euclidean_f1 value: 75.02510506936439 - type: euclidean_precision value: 72.56637168141593 - type: euclidean_recall value: 77.65629812134279 - type: manhattan_accuracy value: 87.60041914076145 - type: manhattan_ap value: 83.05480769911229 - type: manhattan_f1 value: 74.98522895125554 - type: manhattan_precision value: 72.04797047970479 - type: manhattan_recall value: 78.17215891592238 - type: max_accuracy value: 87.80416812201653 - type: max_ap value: 83.45540469219863 - type: max_f1 value: 75.58836427422892 --- # shibing624/text2vec-base-multilingual This is a CoSENT(Cosine Sentence) model: shibing624/text2vec-base-multilingual. It maps sentences to a 384 dimensional dense vector space and can be used for tasks like sentence embeddings, text matching or semantic search. - training dataset: https://huggingface.co/datasets/shibing624/nli-zh-all/tree/main/text2vec-base-multilingual-dataset - base model: sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2 - max_seq_length: 256 - best epoch: 4 - sentence embedding dim: 384 ## Evaluation For an automated evaluation of this model, see the *Evaluation Benchmark*: [text2vec](https://github.com/shibing624/text2vec) ## Languages Available languages are: de, en, es, fr, it, nl, pl, pt, ru, zh ### Release Models - 本项目release模型的中文匹配评测结果: | Arch | BaseModel | Model | ATEC | BQ | LCQMC | PAWSX | STS-B | SOHU-dd | SOHU-dc | Avg | QPS | |:-----------|:-------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------|:-----:|:-----:|:-----:|:-----:|:-----:|:-------:|:-------:|:---------:|:-----:| | Word2Vec | word2vec | [w2v-light-tencent-chinese](https://ai.tencent.com/ailab/nlp/en/download.html) | 20.00 | 31.49 | 59.46 | 2.57 | 55.78 | 55.04 | 20.70 | 35.03 | 23769 | | SBERT | xlm-roberta-base | [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2) | 18.42 | 38.52 | 63.96 | 10.14 | 78.90 | 63.01 | 52.28 | 46.46 | 3138 | | Instructor | hfl/chinese-roberta-wwm-ext | [moka-ai/m3e-base](https://huggingface.co/moka-ai/m3e-base) | 41.27 | 63.81 | 74.87 | 12.20 | 76.96 | 75.83 | 60.55 | 57.93 | 2980 | | CoSENT | hfl/chinese-macbert-base | [shibing624/text2vec-base-chinese](https://huggingface.co/shibing624/text2vec-base-chinese) | 31.93 | 42.67 | 70.16 | 17.21 | 79.30 | 70.27 | 50.42 | 51.61 | 3008 | | CoSENT | hfl/chinese-lert-large | [GanymedeNil/text2vec-large-chinese](https://huggingface.co/GanymedeNil/text2vec-large-chinese) | 32.61 | 44.59 | 69.30 | 14.51 | 79.44 | 73.01 | 59.04 | 53.12 | 2092 | | CoSENT | nghuyong/ernie-3.0-base-zh | [shibing624/text2vec-base-chinese-sentence](https://huggingface.co/shibing624/text2vec-base-chinese-sentence) | 43.37 | 61.43 | 73.48 | 38.90 | 78.25 | 70.60 | 53.08 | 59.87 | 3089 | | CoSENT | nghuyong/ernie-3.0-base-zh | [shibing624/text2vec-base-chinese-paraphrase](https://huggingface.co/shibing624/text2vec-base-chinese-paraphrase) | 44.89 | 63.58 | 74.24 | 40.90 | 78.93 | 76.70 | 63.30 | **63.08** | 3066 | | CoSENT | sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2 | [shibing624/text2vec-base-multilingual](https://huggingface.co/shibing624/text2vec-base-multilingual) | 32.39 | 50.33 | 65.64 | 32.56 | 74.45 | 68.88 | 51.17 | 53.67 | 4004 | 说明: - 结果评测指标:spearman系数 - `shibing624/text2vec-base-chinese`模型,是用CoSENT方法训练,基于`hfl/chinese-macbert-base`在中文STS-B数据训练得到,并在中文STS-B测试集评估达到较好效果,运行[examples/training_sup_text_matching_model.py](https://github.com/shibing624/text2vec/blob/master/examples/training_sup_text_matching_model.py)代码可训练模型,模型文件已经上传HF model hub,中文通用语义匹配任务推荐使用 - `shibing624/text2vec-base-chinese-sentence`模型,是用CoSENT方法训练,基于`nghuyong/ernie-3.0-base-zh`用人工挑选后的中文STS数据集[shibing624/nli-zh-all/text2vec-base-chinese-sentence-dataset](https://huggingface.co/datasets/shibing624/nli-zh-all/tree/main/text2vec-base-chinese-sentence-dataset)训练得到,并在中文各NLI测试集评估达到较好效果,运行[examples/training_sup_text_matching_model_jsonl_data.py](https://github.com/shibing624/text2vec/blob/master/examples/training_sup_text_matching_model_jsonl_data.py)代码可训练模型,模型文件已经上传HF model hub,中文s2s(句子vs句子)语义匹配任务推荐使用 - `shibing624/text2vec-base-chinese-paraphrase`模型,是用CoSENT方法训练,基于`nghuyong/ernie-3.0-base-zh`用人工挑选后的中文STS数据集[shibing624/nli-zh-all/text2vec-base-chinese-paraphrase-dataset](https://huggingface.co/datasets/shibing624/nli-zh-all/tree/main/text2vec-base-chinese-paraphrase-dataset),数据集相对于[shibing624/nli-zh-all/text2vec-base-chinese-sentence-dataset](https://huggingface.co/datasets/shibing624/nli-zh-all/tree/main/text2vec-base-chinese-sentence-dataset)加入了s2p(sentence to paraphrase)数据,强化了其长文本的表征能力,并在中文各NLI测试集评估达到SOTA,运行[examples/training_sup_text_matching_model_jsonl_data.py](https://github.com/shibing624/text2vec/blob/master/examples/training_sup_text_matching_model_jsonl_data.py)代码可训练模型,模型文件已经上传HF model hub,中文s2p(句子vs段落)语义匹配任务推荐使用 - `shibing624/text2vec-base-multilingual`模型,是用CoSENT方法训练,基于`sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2`用人工挑选后的多语言STS数据集[shibing624/nli-zh-all/text2vec-base-multilingual-dataset](https://huggingface.co/datasets/shibing624/nli-zh-all/tree/main/text2vec-base-multilingual-dataset)训练得到,并在中英文测试集评估相对于原模型效果有提升,运行[examples/training_sup_text_matching_model_jsonl_data.py](https://github.com/shibing624/text2vec/blob/master/examples/training_sup_text_matching_model_jsonl_data.py)代码可训练模型,模型文件已经上传HF model hub,多语言语义匹配任务推荐使用 - `w2v-light-tencent-chinese`是腾讯词向量的Word2Vec模型,CPU加载使用,适用于中文字面匹配任务和缺少数据的冷启动情况 - QPS的GPU测试环境是Tesla V100,显存32GB 模型训练实验报告:[实验报告](https://github.com/shibing624/text2vec/blob/master/docs/model_report.md) ## Usage (text2vec) Using this model becomes easy when you have [text2vec](https://github.com/shibing624/text2vec) installed: ``` pip install -U text2vec ``` Then you can use the model like this: ```python from text2vec import SentenceModel sentences = ['如何更换花呗绑定银行卡', 'How to replace the Huabei bundled bank card'] model = SentenceModel('shibing624/text2vec-base-multilingual') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [text2vec](https://github.com/shibing624/text2vec), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. Install transformers: ``` pip install transformers ``` Then load model and predict: ```python from transformers import AutoTokenizer, AutoModel import torch # Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] # First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('shibing624/text2vec-base-multilingual') model = AutoModel.from_pretrained('shibing624/text2vec-base-multilingual') sentences = ['如何更换花呗绑定银行卡', 'How to replace the Huabei bundled bank card'] # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Usage (sentence-transformers) [sentence-transformers](https://github.com/UKPLab/sentence-transformers) is a popular library to compute dense vector representations for sentences. Install sentence-transformers: ``` pip install -U sentence-transformers ``` Then load model and predict: ```python from sentence_transformers import SentenceTransformer m = SentenceTransformer("shibing624/text2vec-base-multilingual") sentences = ['如何更换花呗绑定银行卡', 'How to replace the Huabei bundled bank card'] sentence_embeddings = m.encode(sentences) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Full Model Architecture ``` CoSENT( (0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_mean_tokens': True}) ) ``` ## Intended uses Our model is intented to be used as a sentence and short paragraph encoder. Given an input text, it ouptuts a vector which captures the semantic information. The sentence vector may be used for information retrieval, clustering or sentence similarity tasks. By default, input text longer than 256 word pieces is truncated. ## Training procedure ### Pre-training We use the pretrained [`sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2`](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2) model. Please refer to the model card for more detailed information about the pre-training procedure. ### Fine-tuning We fine-tune the model using a contrastive objective. Formally, we compute the cosine similarity from each possible sentence pairs from the batch. We then apply the rank loss by comparing with true pairs and false pairs. ## Citing & Authors This model was trained by [text2vec](https://github.com/shibing624/text2vec). If you find this model helpful, feel free to cite: ```bibtex @software{text2vec, author = {Ming Xu}, title = {text2vec: A Tool for Text to Vector}, year = {2023}, url = {https://github.com/shibing624/text2vec}, } ```
THUDM/chatglm2-6b
THUDM
"2023-10-09T08:19:27Z"
107,624
1,992
transformers
[ "transformers", "pytorch", "chatglm", "glm", "thudm", "custom_code", "zh", "en", "arxiv:2103.10360", "arxiv:2210.02414", "arxiv:1911.02150", "endpoints_compatible", "has_space", "region:us" ]
null
"2023-06-24T16:26:27Z"
--- language: - zh - en tags: - glm - chatglm - thudm --- # ChatGLM2-6B <p align="center"> 💻 <a href="https://github.com/THUDM/ChatGLM2-6B" target="_blank">Github Repo</a> • 🐦 <a href="https://twitter.com/thukeg" target="_blank">Twitter</a> • 📃 <a href="https://arxiv.org/abs/2103.10360" target="_blank">[GLM@ACL 22]</a> <a href="https://github.com/THUDM/GLM" target="_blank">[GitHub]</a> • 📃 <a href="https://arxiv.org/abs/2210.02414" target="_blank">[GLM-130B@ICLR 23]</a> <a href="https://github.com/THUDM/GLM-130B" target="_blank">[GitHub]</a> <br> </p> <p align="center"> 👋 Join our <a href="https://join.slack.com/t/chatglm/shared_invite/zt-1y7pqoloy-9b1g6T6JjA8J0KxvUjbwJw" target="_blank">Slack</a> and <a href="https://github.com/THUDM/ChatGLM-6B/blob/main/resources/WECHAT.md" target="_blank">WeChat</a> </p> <p align="center"> 📍Experience the larger-scale ChatGLM model at <a href="https://www.chatglm.cn">chatglm.cn</a> </p> ## 介绍 ChatGLM**2**-6B 是开源中英双语对话模型 [ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B) 的第二代版本,在保留了初代模型对话流畅、部署门槛较低等众多优秀特性的基础之上,ChatGLM**2**-6B 引入了如下新特性: 1. **更强大的性能**:基于 ChatGLM 初代模型的开发经验,我们全面升级了 ChatGLM2-6B 的基座模型。ChatGLM2-6B 使用了 [GLM](https://github.com/THUDM/GLM) 的混合目标函数,经过了 1.4T 中英标识符的预训练与人类偏好对齐训练,[评测结果](#评测结果)显示,相比于初代模型,ChatGLM2-6B 在 MMLU(+23%)、CEval(+33%)、GSM8K(+571%) 、BBH(+60%)等数据集上的性能取得了大幅度的提升,在同尺寸开源模型中具有较强的竞争力。 2. **更长的上下文**:基于 [FlashAttention](https://github.com/HazyResearch/flash-attention) 技术,我们将基座模型的上下文长度(Context Length)由 ChatGLM-6B 的 2K 扩展到了 32K,并在对话阶段使用 8K 的上下文长度训练,允许更多轮次的对话。但当前版本的 ChatGLM2-6B 对单轮超长文档的理解能力有限,我们会在后续迭代升级中着重进行优化。 3. **更高效的推理**:基于 [Multi-Query Attention](http://arxiv.org/abs/1911.02150) 技术,ChatGLM2-6B 有更高效的推理速度和更低的显存占用:在官方的模型实现下,推理速度相比初代提升了 42%,INT4 量化下,6G 显存支持的对话长度由 1K 提升到了 8K。 4. **更开放的协议**:ChatGLM2-6B 权重对学术研究**完全开放**,在填写[问卷](https://open.bigmodel.cn/mla/form)进行登记后**亦允许免费商业使用**。 ChatGLM**2**-6B is the second-generation version of the open-source bilingual (Chinese-English) chat model [ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B). It retains the smooth conversation flow and low deployment threshold of the first-generation model, while introducing the following new features: 1. **Stronger Performance**: Based on the development experience of the first-generation ChatGLM model, we have fully upgraded the base model of ChatGLM2-6B. ChatGLM2-6B uses the hybrid objective function of [GLM](https://github.com/THUDM/GLM), and has undergone pre-training with 1.4T bilingual tokens and human preference alignment training. The [evaluation results](README.md#evaluation-results) show that, compared to the first-generation model, ChatGLM2-6B has achieved substantial improvements in performance on datasets like MMLU (+23%), CEval (+33%), GSM8K (+571%), BBH (+60%), showing strong competitiveness among models of the same size. 2. **Longer Context**: Based on [FlashAttention](https://github.com/HazyResearch/flash-attention) technique, we have extended the context length of the base model from 2K in ChatGLM-6B to 32K, and trained with a context length of 8K during the dialogue alignment, allowing for more rounds of dialogue. However, the current version of ChatGLM2-6B has limited understanding of single-round ultra-long documents, which we will focus on optimizing in future iterations. 3. **More Efficient Inference**: Based on [Multi-Query Attention](http://arxiv.org/abs/1911.02150) technique, ChatGLM2-6B has more efficient inference speed and lower GPU memory usage: under the official implementation, the inference speed has increased by 42% compared to the first generation; under INT4 quantization, the dialogue length supported by 6G GPU memory has increased from 1K to 8K. 4. **More Open License**: ChatGLM2-6B weights are **completely open** for academic research, and **free commercial use** is also allowed after completing the [questionnaire](https://open.bigmodel.cn/mla/form). ## 软件依赖 ```shell pip install protobuf transformers==4.30.2 cpm_kernels torch>=2.0 gradio mdtex2html sentencepiece accelerate ``` ## 代码调用 可以通过如下代码调用 ChatGLM-6B 模型来生成对话: ```ipython >>> from transformers import AutoTokenizer, AutoModel >>> tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True) >>> model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).half().cuda() >>> model = model.eval() >>> response, history = model.chat(tokenizer, "你好", history=[]) >>> print(response) 你好👋!我是人工智能助手 ChatGLM-6B,很高兴见到你,欢迎问我任何问题。 >>> response, history = model.chat(tokenizer, "晚上睡不着应该怎么办", history=history) >>> print(response) 晚上睡不着可能会让你感到焦虑或不舒服,但以下是一些可以帮助你入睡的方法: 1. 制定规律的睡眠时间表:保持规律的睡眠时间表可以帮助你建立健康的睡眠习惯,使你更容易入睡。尽量在每天的相同时间上床,并在同一时间起床。 2. 创造一个舒适的睡眠环境:确保睡眠环境舒适,安静,黑暗且温度适宜。可以使用舒适的床上用品,并保持房间通风。 3. 放松身心:在睡前做些放松的活动,例如泡个热水澡,听些轻柔的音乐,阅读一些有趣的书籍等,有助于缓解紧张和焦虑,使你更容易入睡。 4. 避免饮用含有咖啡因的饮料:咖啡因是一种刺激性物质,会影响你的睡眠质量。尽量避免在睡前饮用含有咖啡因的饮料,例如咖啡,茶和可乐。 5. 避免在床上做与睡眠无关的事情:在床上做些与睡眠无关的事情,例如看电影,玩游戏或工作等,可能会干扰你的睡眠。 6. 尝试呼吸技巧:深呼吸是一种放松技巧,可以帮助你缓解紧张和焦虑,使你更容易入睡。试着慢慢吸气,保持几秒钟,然后缓慢呼气。 如果这些方法无法帮助你入睡,你可以考虑咨询医生或睡眠专家,寻求进一步的建议。 ``` 关于更多的使用说明,包括如何运行命令行和网页版本的 DEMO,以及使用模型量化以节省显存,请参考我们的 [Github Repo](https://github.com/THUDM/ChatGLM2-6B)。 For more instructions, including how to run CLI and web demos, and model quantization, please refer to our [Github Repo](https://github.com/THUDM/ChatGLM2-6B). ## Change Log * v1.0 ## 协议 本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源,ChatGLM2-6B 模型的权重的使用则需要遵循 [Model License](MODEL_LICENSE)。 ## 引用 如果你觉得我们的工作有帮助的话,请考虑引用下列论文,ChatGLM2-6B 的论文会在近期公布,敬请期待~ ``` @article{zeng2022glm, title={Glm-130b: An open bilingual pre-trained model}, author={Zeng, Aohan and Liu, Xiao and Du, Zhengxiao and Wang, Zihan and Lai, Hanyu and Ding, Ming and Yang, Zhuoyi and Xu, Yifan and Zheng, Wendi and Xia, Xiao and others}, journal={arXiv preprint arXiv:2210.02414}, year={2022} } ``` ``` @inproceedings{du2022glm, title={GLM: General Language Model Pretraining with Autoregressive Blank Infilling}, author={Du, Zhengxiao and Qian, Yujie and Liu, Xiao and Ding, Ming and Qiu, Jiezhong and Yang, Zhilin and Tang, Jie}, booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, pages={320--335}, year={2022} } ```
google/siglip-so400m-patch14-384
google
"2024-01-19T23:33:22Z"
107,608
33
transformers
[ "transformers", "safetensors", "siglip", "zero-shot-image-classification", "vision", "arxiv:2303.15343", "arxiv:2305.13035", "arxiv:2209.06794", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
zero-shot-image-classification
"2024-01-08T13:38:32Z"
--- license: apache-2.0 tags: - vision widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-dog-music.png candidate_labels: playing music, playing sports example_title: Cat & Dog --- # SigLIP (shape-optimized model) SigLIP model pre-trained on WebLi at resolution 384x384. It was introduced in the paper [Sigmoid Loss for Language Image Pre-Training](https://arxiv.org/abs/2303.15343) by Zhai et al. and first released in [this repository](https://github.com/google-research/big_vision). This model has the SoViT-400m architecture, which is the shape-optimized version as presented in [Getting ViT in Shape: Scaling Laws for Compute-Optimal Model Design](https://arxiv.org/abs/2305.13035) by Alabdulmohsin et al. Disclaimer: The team releasing SigLIP did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description SigLIP is [CLIP](https://huggingface.co/docs/transformers/model_doc/clip), a multimodal model, with a better loss function. The sigmoid loss operates solely on image-text pairs and does not require a global view of the pairwise similarities for normalization. This allows further scaling up the batch size, while also performing better at smaller batch sizes. A TLDR of SigLIP by one of the authors can be found [here](https://twitter.com/giffmana/status/1692641733459267713). ## Intended uses & limitations You can use the raw model for tasks like zero-shot image classification and image-text retrieval. See the [model hub](https://huggingface.co/models?search=google/siglip) to look for other versions on a task that interests you. ### How to use Here is how to use this model to perform zero-shot image classification: ```python from PIL import Image import requests from transformers import AutoProcessor, AutoModel import torch model = AutoModel.from_pretrained("google/siglip-so400m-patch14-384") processor = AutoProcessor.from_pretrained("google/siglip-so400m-patch14-384") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) texts = ["a photo of 2 cats", "a photo of 2 dogs"] inputs = processor(text=texts, images=image, padding="max_length", return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) logits_per_image = outputs.logits_per_image probs = torch.sigmoid(logits_per_image) # these are the probabilities print(f"{probs[0][0]:.1%} that image 0 is '{texts[0]}'") ``` Alternatively, one can leverage the pipeline API which abstracts away the complexity for the user: ```python from transformers import pipeline from PIL import Image import requests # load pipe image_classifier = pipeline(task="zero-shot-image-classification", model="google/siglip-so400m-patch14-384") # load image url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) # inference outputs = image_classifier(image, candidate_labels=["2 cats", "a plane", "a remote"]) outputs = [{"score": round(output["score"], 4), "label": output["label"] } for output in outputs] print(outputs) ``` For more code examples, we refer to the [documentation](https://huggingface.co/transformers/main/model_doc/siglip.html#). ## Training procedure ### Training data SigLIP is pre-trained on the WebLI dataset [(Chen et al., 2023)](https://arxiv.org/abs/2209.06794). ### Preprocessing Images are resized/rescaled to the same resolution (384x384) and normalized across the RGB channels with mean (0.5, 0.5, 0.5) and standard deviation (0.5, 0.5, 0.5). Texts are tokenized and padded to the same length (64 tokens). ### Compute The model was trained on 16 TPU-v4 chips for three days. ## Evaluation results Evaluation of SigLIP compared to CLIP is shown below (taken from the paper). <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/siglip_table.jpeg" alt="drawing" width="600"/> ### BibTeX entry and citation info ```bibtex @misc{zhai2023sigmoid, title={Sigmoid Loss for Language Image Pre-Training}, author={Xiaohua Zhai and Basil Mustafa and Alexander Kolesnikov and Lucas Beyer}, year={2023}, eprint={2303.15343}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
unsloth/mistral-7b-bnb-4bit
unsloth
"2024-03-22T15:25:16Z"
107,479
19
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "unsloth", "mistral-7b", "bnb", "en", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "4-bit", "region:us" ]
text-generation
"2023-12-25T17:15:38Z"
--- language: - en license: apache-2.0 library_name: transformers tags: - unsloth - transformers - mistral - mistral-7b - bnb --- # Finetune Mistral, Gemma, Llama 2-5x faster with 70% less memory via Unsloth! Directly quantized 4bit model with `bitsandbytes`. We have a Google Colab Tesla T4 notebook for Mistral 7b here: https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/Discord%20button.png" width="200"/>](https://discord.gg/u54VK8m8tk) [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/buy%20me%20a%20coffee%20button.png" width="200"/>](https://ko-fi.com/unsloth) [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth) ## ✨ Finetune for Free All notebooks are **beginner friendly**! Add your dataset, click "Run All", and you'll get a 2x faster finetuned model which can be exported to GGUF, vLLM or uploaded to Hugging Face. | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------------|--------------------------------------------------------------------------------------------------------------------------|-------------|----------| | **Gemma 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) | 2.4x faster | 58% less | | **Mistral 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 2.2x faster | 62% less | | **Llama-2 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing) | 2.2x faster | 43% less | | **TinyLlama** | [▶️ Start on Colab](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 74% less | | **CodeLlama 34b** A100 | [▶️ Start on Colab](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | 1.9x faster | 27% less | | **Mistral 7b** 1xT4 | [▶️ Start on Kaggle](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) | 5x faster\* | 62% less | | **DPO - Zephyr** | [▶️ Start on Colab](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 19% less | - This [conversational notebook](https://colab.research.google.com/drive/1Aau3lgPzeZKQ-98h69CCu1UJcvIBLmy2?usp=sharing) is useful for ShareGPT ChatML / Vicuna templates. - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for raw text. This [DPO notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) replicates Zephyr. - \* Kaggle has 2x T4s, but we use 1. Due to overhead, 1x T4 is 5x faster.
klue/roberta-base
klue
"2023-06-12T12:29:12Z"
107,351
14
transformers
[ "transformers", "pytorch", "safetensors", "roberta", "fill-mask", "korean", "klue", "ko", "arxiv:2105.09680", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
"2022-03-02T23:29:05Z"
--- language: ko tags: - korean - klue mask_token: "[MASK]" widget: - text: 대한민국의 수도는 [MASK] 입니다. --- # KLUE RoBERTa base Pretrained RoBERTa Model on Korean Language. See [Github](https://github.com/KLUE-benchmark/KLUE) and [Paper](https://arxiv.org/abs/2105.09680) for more details. ## How to use _NOTE:_ Use `BertTokenizer` instead of RobertaTokenizer. (`AutoTokenizer` will load `BertTokenizer`) ```python from transformers import AutoModel, AutoTokenizer model = AutoModel.from_pretrained("klue/roberta-base") tokenizer = AutoTokenizer.from_pretrained("klue/roberta-base") ``` ## BibTeX entry and citation info ```bibtex @misc{park2021klue, title={KLUE: Korean Language Understanding Evaluation}, author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho}, year={2021}, eprint={2105.09680}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
avsolatorio/GIST-large-Embedding-v0
avsolatorio
"2024-02-28T00:34:23Z"
106,567
6
sentence-transformers
[ "sentence-transformers", "safetensors", "bert", "feature-extraction", "mteb", "sentence-similarity", "en", "arxiv:2402.16829", "arxiv:2212.09741", "license:mit", "model-index", "endpoints_compatible", "region:us", "has_space" ]
sentence-similarity
"2024-02-14T18:26:25Z"
--- language: - en library_name: sentence-transformers license: mit pipeline_tag: sentence-similarity tags: - feature-extraction - mteb - sentence-similarity - sentence-transformers model-index: - name: GIST-large-Embedding-v0 results: - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (en) config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 75.5820895522388 - type: ap value: 38.32190121241783 - type: f1 value: 69.44777155231054 - task: type: Classification dataset: type: mteb/amazon_polarity name: MTEB AmazonPolarityClassification config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 93.40514999999998 - type: ap value: 90.2011565132406 - type: f1 value: 93.39486246843605 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (en) config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 49.05999999999999 - type: f1 value: 48.58702718571088 - task: type: Retrieval dataset: type: arguana name: MTEB ArguAna config: default split: test revision: None metrics: - type: map_at_1 value: 38.407000000000004 - type: map_at_10 value: 54.822 - type: map_at_100 value: 55.387 - type: map_at_1000 value: 55.388999999999996 - type: map_at_3 value: 50.308 - type: map_at_5 value: 53.199 - type: mrr_at_1 value: 39.900000000000006 - type: mrr_at_10 value: 55.385 - type: mrr_at_100 value: 55.936 - type: mrr_at_1000 value: 55.93900000000001 - type: mrr_at_3 value: 50.853 - type: mrr_at_5 value: 53.738 - type: ndcg_at_1 value: 38.407000000000004 - type: ndcg_at_10 value: 63.38 - type: ndcg_at_100 value: 65.52900000000001 - type: ndcg_at_1000 value: 65.58800000000001 - type: ndcg_at_3 value: 54.26 - type: ndcg_at_5 value: 59.488 - type: precision_at_1 value: 38.407000000000004 - type: precision_at_10 value: 9.04 - type: precision_at_100 value: 0.992 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 21.906 - type: precision_at_5 value: 15.690000000000001 - type: recall_at_1 value: 38.407000000000004 - type: recall_at_10 value: 90.398 - type: recall_at_100 value: 99.21799999999999 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 65.718 - type: recall_at_5 value: 78.45 - task: type: Clustering dataset: type: mteb/arxiv-clustering-p2p name: MTEB ArxivClusteringP2P config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 48.49766333679089 - task: type: Clustering dataset: type: mteb/arxiv-clustering-s2s name: MTEB ArxivClusteringS2S config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 42.57731111438094 - task: type: Reranking dataset: type: mteb/askubuntudupquestions-reranking name: MTEB AskUbuntuDupQuestions config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 64.70120072857361 - type: mrr value: 77.86714593501297 - task: type: STS dataset: type: mteb/biosses-sts name: MTEB BIOSSES config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 90.73821860690765 - type: cos_sim_spearman value: 89.17070651383446 - type: euclidean_pearson value: 88.28303958293029 - type: euclidean_spearman value: 88.81889126856979 - type: manhattan_pearson value: 88.09080621828731 - type: manhattan_spearman value: 88.55924679817751 - task: type: Classification dataset: type: mteb/banking77 name: MTEB Banking77Classification config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 88.10064935064933 - type: f1 value: 88.08460758973867 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-p2p name: MTEB BiorxivClusteringP2P config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 39.338228337929976 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-s2s name: MTEB BiorxivClusteringS2S config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 36.179156232378226 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackAndroidRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 33.440999999999995 - type: map_at_10 value: 45.495000000000005 - type: map_at_100 value: 47.132000000000005 - type: map_at_1000 value: 47.253 - type: map_at_3 value: 41.766 - type: map_at_5 value: 43.873 - type: mrr_at_1 value: 40.772999999999996 - type: mrr_at_10 value: 51.627 - type: mrr_at_100 value: 52.364 - type: mrr_at_1000 value: 52.397000000000006 - type: mrr_at_3 value: 48.951 - type: mrr_at_5 value: 50.746 - type: ndcg_at_1 value: 40.772999999999996 - type: ndcg_at_10 value: 52.306 - type: ndcg_at_100 value: 57.753 - type: ndcg_at_1000 value: 59.36900000000001 - type: ndcg_at_3 value: 47.177 - type: ndcg_at_5 value: 49.71 - type: precision_at_1 value: 40.772999999999996 - type: precision_at_10 value: 10.129000000000001 - type: precision_at_100 value: 1.617 - type: precision_at_1000 value: 0.208 - type: precision_at_3 value: 22.985 - type: precision_at_5 value: 16.652 - type: recall_at_1 value: 33.440999999999995 - type: recall_at_10 value: 65.121 - type: recall_at_100 value: 87.55199999999999 - type: recall_at_1000 value: 97.41300000000001 - type: recall_at_3 value: 49.958999999999996 - type: recall_at_5 value: 57.14900000000001 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackEnglishRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 32.126 - type: map_at_10 value: 42.856 - type: map_at_100 value: 44.134 - type: map_at_1000 value: 44.274 - type: map_at_3 value: 39.594 - type: map_at_5 value: 41.504999999999995 - type: mrr_at_1 value: 40.127 - type: mrr_at_10 value: 48.736000000000004 - type: mrr_at_100 value: 49.303999999999995 - type: mrr_at_1000 value: 49.356 - type: mrr_at_3 value: 46.263 - type: mrr_at_5 value: 47.878 - type: ndcg_at_1 value: 40.127 - type: ndcg_at_10 value: 48.695 - type: ndcg_at_100 value: 52.846000000000004 - type: ndcg_at_1000 value: 54.964 - type: ndcg_at_3 value: 44.275 - type: ndcg_at_5 value: 46.54 - type: precision_at_1 value: 40.127 - type: precision_at_10 value: 9.229 - type: precision_at_100 value: 1.473 - type: precision_at_1000 value: 0.19499999999999998 - type: precision_at_3 value: 21.444 - type: precision_at_5 value: 15.389 - type: recall_at_1 value: 32.126 - type: recall_at_10 value: 58.971 - type: recall_at_100 value: 76.115 - type: recall_at_1000 value: 89.556 - type: recall_at_3 value: 45.891 - type: recall_at_5 value: 52.242 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackGamingRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 41.312 - type: map_at_10 value: 54.510000000000005 - type: map_at_100 value: 55.544000000000004 - type: map_at_1000 value: 55.593 - type: map_at_3 value: 50.859 - type: map_at_5 value: 52.839999999999996 - type: mrr_at_1 value: 47.147 - type: mrr_at_10 value: 57.678 - type: mrr_at_100 value: 58.287 - type: mrr_at_1000 value: 58.312 - type: mrr_at_3 value: 55.025999999999996 - type: mrr_at_5 value: 56.55 - type: ndcg_at_1 value: 47.147 - type: ndcg_at_10 value: 60.672000000000004 - type: ndcg_at_100 value: 64.411 - type: ndcg_at_1000 value: 65.35499999999999 - type: ndcg_at_3 value: 54.643 - type: ndcg_at_5 value: 57.461 - type: precision_at_1 value: 47.147 - type: precision_at_10 value: 9.881 - type: precision_at_100 value: 1.27 - type: precision_at_1000 value: 0.13799999999999998 - type: precision_at_3 value: 24.556 - type: precision_at_5 value: 16.814999999999998 - type: recall_at_1 value: 41.312 - type: recall_at_10 value: 75.62299999999999 - type: recall_at_100 value: 91.388 - type: recall_at_1000 value: 98.08 - type: recall_at_3 value: 59.40299999999999 - type: recall_at_5 value: 66.43900000000001 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackGisRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 27.609 - type: map_at_10 value: 37.614 - type: map_at_100 value: 38.584 - type: map_at_1000 value: 38.652 - type: map_at_3 value: 34.731 - type: map_at_5 value: 36.308 - type: mrr_at_1 value: 29.944 - type: mrr_at_10 value: 39.829 - type: mrr_at_100 value: 40.659 - type: mrr_at_1000 value: 40.709 - type: mrr_at_3 value: 37.269000000000005 - type: mrr_at_5 value: 38.625 - type: ndcg_at_1 value: 29.944 - type: ndcg_at_10 value: 43.082 - type: ndcg_at_100 value: 47.857 - type: ndcg_at_1000 value: 49.612 - type: ndcg_at_3 value: 37.578 - type: ndcg_at_5 value: 40.135 - type: precision_at_1 value: 29.944 - type: precision_at_10 value: 6.678000000000001 - type: precision_at_100 value: 0.951 - type: precision_at_1000 value: 0.11399999999999999 - type: precision_at_3 value: 16.045 - type: precision_at_5 value: 11.073 - type: recall_at_1 value: 27.609 - type: recall_at_10 value: 57.718 - type: recall_at_100 value: 79.768 - type: recall_at_1000 value: 92.868 - type: recall_at_3 value: 42.876 - type: recall_at_5 value: 49.104 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackMathematicaRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 18.071 - type: map_at_10 value: 27.471 - type: map_at_100 value: 28.71 - type: map_at_1000 value: 28.833 - type: map_at_3 value: 24.698 - type: map_at_5 value: 26.461000000000002 - type: mrr_at_1 value: 22.387999999999998 - type: mrr_at_10 value: 32.522 - type: mrr_at_100 value: 33.393 - type: mrr_at_1000 value: 33.455 - type: mrr_at_3 value: 29.830000000000002 - type: mrr_at_5 value: 31.472 - type: ndcg_at_1 value: 22.387999999999998 - type: ndcg_at_10 value: 33.278999999999996 - type: ndcg_at_100 value: 39.043 - type: ndcg_at_1000 value: 41.763 - type: ndcg_at_3 value: 28.310999999999996 - type: ndcg_at_5 value: 31.007 - type: precision_at_1 value: 22.387999999999998 - type: precision_at_10 value: 6.157 - type: precision_at_100 value: 1.042 - type: precision_at_1000 value: 0.14200000000000002 - type: precision_at_3 value: 13.972000000000001 - type: precision_at_5 value: 10.274 - type: recall_at_1 value: 18.071 - type: recall_at_10 value: 46.025 - type: recall_at_100 value: 71.153 - type: recall_at_1000 value: 90.232 - type: recall_at_3 value: 32.311 - type: recall_at_5 value: 39.296 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackPhysicsRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 30.813000000000002 - type: map_at_10 value: 42.594 - type: map_at_100 value: 43.949 - type: map_at_1000 value: 44.052 - type: map_at_3 value: 39.1 - type: map_at_5 value: 41.111 - type: mrr_at_1 value: 37.824999999999996 - type: mrr_at_10 value: 48.06 - type: mrr_at_100 value: 48.91 - type: mrr_at_1000 value: 48.946 - type: mrr_at_3 value: 45.509 - type: mrr_at_5 value: 47.073 - type: ndcg_at_1 value: 37.824999999999996 - type: ndcg_at_10 value: 48.882 - type: ndcg_at_100 value: 54.330999999999996 - type: ndcg_at_1000 value: 56.120999999999995 - type: ndcg_at_3 value: 43.529 - type: ndcg_at_5 value: 46.217999999999996 - type: precision_at_1 value: 37.824999999999996 - type: precision_at_10 value: 8.845 - type: precision_at_100 value: 1.34 - type: precision_at_1000 value: 0.168 - type: precision_at_3 value: 20.757 - type: precision_at_5 value: 14.802999999999999 - type: recall_at_1 value: 30.813000000000002 - type: recall_at_10 value: 61.895999999999994 - type: recall_at_100 value: 84.513 - type: recall_at_1000 value: 95.817 - type: recall_at_3 value: 47.099000000000004 - type: recall_at_5 value: 54.031 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackProgrammersRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 25.735999999999997 - type: map_at_10 value: 36.799 - type: map_at_100 value: 38.246 - type: map_at_1000 value: 38.353 - type: map_at_3 value: 33.133 - type: map_at_5 value: 34.954 - type: mrr_at_1 value: 31.849 - type: mrr_at_10 value: 41.928 - type: mrr_at_100 value: 42.846000000000004 - type: mrr_at_1000 value: 42.894 - type: mrr_at_3 value: 39.117000000000004 - type: mrr_at_5 value: 40.521 - type: ndcg_at_1 value: 31.849 - type: ndcg_at_10 value: 43.143 - type: ndcg_at_100 value: 48.963 - type: ndcg_at_1000 value: 51.041000000000004 - type: ndcg_at_3 value: 37.218 - type: ndcg_at_5 value: 39.542 - type: precision_at_1 value: 31.849 - type: precision_at_10 value: 8.231 - type: precision_at_100 value: 1.277 - type: precision_at_1000 value: 0.164 - type: precision_at_3 value: 18.037 - type: precision_at_5 value: 12.945 - type: recall_at_1 value: 25.735999999999997 - type: recall_at_10 value: 56.735 - type: recall_at_100 value: 81.04 - type: recall_at_1000 value: 94.845 - type: recall_at_3 value: 40.239999999999995 - type: recall_at_5 value: 46.378 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 27.580333333333336 - type: map_at_10 value: 37.70558333333334 - type: map_at_100 value: 38.94941666666667 - type: map_at_1000 value: 39.062083333333334 - type: map_at_3 value: 34.63333333333334 - type: map_at_5 value: 36.35241666666666 - type: mrr_at_1 value: 32.64866666666667 - type: mrr_at_10 value: 42.018499999999996 - type: mrr_at_100 value: 42.83391666666666 - type: mrr_at_1000 value: 42.884166666666665 - type: mrr_at_3 value: 39.476499999999994 - type: mrr_at_5 value: 40.96983333333334 - type: ndcg_at_1 value: 32.64866666666667 - type: ndcg_at_10 value: 43.43866666666667 - type: ndcg_at_100 value: 48.569833333333335 - type: ndcg_at_1000 value: 50.6495 - type: ndcg_at_3 value: 38.327166666666656 - type: ndcg_at_5 value: 40.76941666666667 - type: precision_at_1 value: 32.64866666666667 - type: precision_at_10 value: 7.652333333333332 - type: precision_at_100 value: 1.2066666666666666 - type: precision_at_1000 value: 0.15841666666666668 - type: precision_at_3 value: 17.75108333333333 - type: precision_at_5 value: 12.641916666666669 - type: recall_at_1 value: 27.580333333333336 - type: recall_at_10 value: 56.02591666666667 - type: recall_at_100 value: 78.317 - type: recall_at_1000 value: 92.52608333333332 - type: recall_at_3 value: 41.84283333333333 - type: recall_at_5 value: 48.105666666666664 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackStatsRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 27.876 - type: map_at_10 value: 34.521 - type: map_at_100 value: 35.581 - type: map_at_1000 value: 35.674 - type: map_at_3 value: 32.501000000000005 - type: map_at_5 value: 33.602 - type: mrr_at_1 value: 31.441999999999997 - type: mrr_at_10 value: 37.669999999999995 - type: mrr_at_100 value: 38.523 - type: mrr_at_1000 value: 38.59 - type: mrr_at_3 value: 35.762 - type: mrr_at_5 value: 36.812 - type: ndcg_at_1 value: 31.441999999999997 - type: ndcg_at_10 value: 38.46 - type: ndcg_at_100 value: 43.479 - type: ndcg_at_1000 value: 45.858 - type: ndcg_at_3 value: 34.668 - type: ndcg_at_5 value: 36.416 - type: precision_at_1 value: 31.441999999999997 - type: precision_at_10 value: 5.782 - type: precision_at_100 value: 0.91 - type: precision_at_1000 value: 0.11900000000000001 - type: precision_at_3 value: 14.417 - type: precision_at_5 value: 9.876999999999999 - type: recall_at_1 value: 27.876 - type: recall_at_10 value: 47.556 - type: recall_at_100 value: 70.39699999999999 - type: recall_at_1000 value: 87.969 - type: recall_at_3 value: 37.226 - type: recall_at_5 value: 41.43 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackTexRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 18.854000000000003 - type: map_at_10 value: 26.632 - type: map_at_100 value: 27.849 - type: map_at_1000 value: 27.977 - type: map_at_3 value: 24.089 - type: map_at_5 value: 25.477 - type: mrr_at_1 value: 22.987 - type: mrr_at_10 value: 30.781999999999996 - type: mrr_at_100 value: 31.746000000000002 - type: mrr_at_1000 value: 31.818 - type: mrr_at_3 value: 28.43 - type: mrr_at_5 value: 29.791 - type: ndcg_at_1 value: 22.987 - type: ndcg_at_10 value: 31.585 - type: ndcg_at_100 value: 37.32 - type: ndcg_at_1000 value: 40.072 - type: ndcg_at_3 value: 27.058 - type: ndcg_at_5 value: 29.137999999999998 - type: precision_at_1 value: 22.987 - type: precision_at_10 value: 5.76 - type: precision_at_100 value: 1.018 - type: precision_at_1000 value: 0.14400000000000002 - type: precision_at_3 value: 12.767000000000001 - type: precision_at_5 value: 9.257 - type: recall_at_1 value: 18.854000000000003 - type: recall_at_10 value: 42.349 - type: recall_at_100 value: 68.15299999999999 - type: recall_at_1000 value: 87.44 - type: recall_at_3 value: 29.715999999999998 - type: recall_at_5 value: 35.085 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackUnixRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 28.094 - type: map_at_10 value: 38.22 - type: map_at_100 value: 39.352 - type: map_at_1000 value: 39.452 - type: map_at_3 value: 35.339 - type: map_at_5 value: 36.78 - type: mrr_at_1 value: 33.022 - type: mrr_at_10 value: 42.466 - type: mrr_at_100 value: 43.3 - type: mrr_at_1000 value: 43.356 - type: mrr_at_3 value: 40.159 - type: mrr_at_5 value: 41.272999999999996 - type: ndcg_at_1 value: 33.022 - type: ndcg_at_10 value: 43.976 - type: ndcg_at_100 value: 49.008 - type: ndcg_at_1000 value: 51.154999999999994 - type: ndcg_at_3 value: 38.891 - type: ndcg_at_5 value: 40.897 - type: precision_at_1 value: 33.022 - type: precision_at_10 value: 7.396999999999999 - type: precision_at_100 value: 1.1199999999999999 - type: precision_at_1000 value: 0.14200000000000002 - type: precision_at_3 value: 17.724 - type: precision_at_5 value: 12.239 - type: recall_at_1 value: 28.094 - type: recall_at_10 value: 57.162 - type: recall_at_100 value: 78.636 - type: recall_at_1000 value: 93.376 - type: recall_at_3 value: 43.328 - type: recall_at_5 value: 48.252 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackWebmastersRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 24.937 - type: map_at_10 value: 34.82 - type: map_at_100 value: 36.405 - type: map_at_1000 value: 36.626 - type: map_at_3 value: 31.548 - type: map_at_5 value: 33.355000000000004 - type: mrr_at_1 value: 30.435000000000002 - type: mrr_at_10 value: 39.946 - type: mrr_at_100 value: 40.873 - type: mrr_at_1000 value: 40.910000000000004 - type: mrr_at_3 value: 37.088 - type: mrr_at_5 value: 38.808 - type: ndcg_at_1 value: 30.435000000000002 - type: ndcg_at_10 value: 41.25 - type: ndcg_at_100 value: 47.229 - type: ndcg_at_1000 value: 49.395 - type: ndcg_at_3 value: 35.801 - type: ndcg_at_5 value: 38.457 - type: precision_at_1 value: 30.435000000000002 - type: precision_at_10 value: 8.083 - type: precision_at_100 value: 1.601 - type: precision_at_1000 value: 0.247 - type: precision_at_3 value: 17.061999999999998 - type: precision_at_5 value: 12.767000000000001 - type: recall_at_1 value: 24.937 - type: recall_at_10 value: 53.905 - type: recall_at_100 value: 80.607 - type: recall_at_1000 value: 93.728 - type: recall_at_3 value: 38.446000000000005 - type: recall_at_5 value: 45.188 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackWordpressRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 22.095000000000002 - type: map_at_10 value: 30.935000000000002 - type: map_at_100 value: 31.907000000000004 - type: map_at_1000 value: 32.006 - type: map_at_3 value: 28.242 - type: map_at_5 value: 29.963 - type: mrr_at_1 value: 23.845 - type: mrr_at_10 value: 32.978 - type: mrr_at_100 value: 33.802 - type: mrr_at_1000 value: 33.867000000000004 - type: mrr_at_3 value: 30.314000000000004 - type: mrr_at_5 value: 32.089 - type: ndcg_at_1 value: 23.845 - type: ndcg_at_10 value: 35.934 - type: ndcg_at_100 value: 40.598 - type: ndcg_at_1000 value: 43.089 - type: ndcg_at_3 value: 30.776999999999997 - type: ndcg_at_5 value: 33.711999999999996 - type: precision_at_1 value: 23.845 - type: precision_at_10 value: 5.656 - type: precision_at_100 value: 0.861 - type: precision_at_1000 value: 0.12 - type: precision_at_3 value: 13.247 - type: precision_at_5 value: 9.612 - type: recall_at_1 value: 22.095000000000002 - type: recall_at_10 value: 49.25 - type: recall_at_100 value: 70.482 - type: recall_at_1000 value: 88.98899999999999 - type: recall_at_3 value: 35.619 - type: recall_at_5 value: 42.674 - task: type: Retrieval dataset: type: climate-fever name: MTEB ClimateFEVER config: default split: test revision: None metrics: - type: map_at_1 value: 14.154 - type: map_at_10 value: 24.654999999999998 - type: map_at_100 value: 26.723999999999997 - type: map_at_1000 value: 26.912000000000003 - type: map_at_3 value: 20.4 - type: map_at_5 value: 22.477 - type: mrr_at_1 value: 32.117000000000004 - type: mrr_at_10 value: 44.590999999999994 - type: mrr_at_100 value: 45.425 - type: mrr_at_1000 value: 45.456 - type: mrr_at_3 value: 41.281 - type: mrr_at_5 value: 43.219 - type: ndcg_at_1 value: 32.117000000000004 - type: ndcg_at_10 value: 33.994 - type: ndcg_at_100 value: 41.438 - type: ndcg_at_1000 value: 44.611000000000004 - type: ndcg_at_3 value: 27.816000000000003 - type: ndcg_at_5 value: 29.816 - type: precision_at_1 value: 32.117000000000004 - type: precision_at_10 value: 10.756 - type: precision_at_100 value: 1.8679999999999999 - type: precision_at_1000 value: 0.246 - type: precision_at_3 value: 20.803 - type: precision_at_5 value: 15.987000000000002 - type: recall_at_1 value: 14.154 - type: recall_at_10 value: 40.489999999999995 - type: recall_at_100 value: 65.635 - type: recall_at_1000 value: 83.276 - type: recall_at_3 value: 25.241000000000003 - type: recall_at_5 value: 31.211 - task: type: Retrieval dataset: type: dbpedia-entity name: MTEB DBPedia config: default split: test revision: None metrics: - type: map_at_1 value: 9.332 - type: map_at_10 value: 20.462 - type: map_at_100 value: 29.473 - type: map_at_1000 value: 31.215 - type: map_at_3 value: 14.466999999999999 - type: map_at_5 value: 16.922 - type: mrr_at_1 value: 69.5 - type: mrr_at_10 value: 77.039 - type: mrr_at_100 value: 77.265 - type: mrr_at_1000 value: 77.271 - type: mrr_at_3 value: 75.5 - type: mrr_at_5 value: 76.4 - type: ndcg_at_1 value: 57.125 - type: ndcg_at_10 value: 42.958 - type: ndcg_at_100 value: 48.396 - type: ndcg_at_1000 value: 55.897 - type: ndcg_at_3 value: 47.188 - type: ndcg_at_5 value: 44.376 - type: precision_at_1 value: 69.5 - type: precision_at_10 value: 34.5 - type: precision_at_100 value: 11.18 - type: precision_at_1000 value: 2.13 - type: precision_at_3 value: 51.083 - type: precision_at_5 value: 43.1 - type: recall_at_1 value: 9.332 - type: recall_at_10 value: 26.422 - type: recall_at_100 value: 56.098000000000006 - type: recall_at_1000 value: 79.66 - type: recall_at_3 value: 15.703 - type: recall_at_5 value: 19.644000000000002 - task: type: Classification dataset: type: mteb/emotion name: MTEB EmotionClassification config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 54.72 - type: f1 value: 49.67819606587526 - task: type: Retrieval dataset: type: fever name: MTEB FEVER config: default split: test revision: None metrics: - type: map_at_1 value: 74.97 - type: map_at_10 value: 82.956 - type: map_at_100 value: 83.193 - type: map_at_1000 value: 83.208 - type: map_at_3 value: 81.837 - type: map_at_5 value: 82.57 - type: mrr_at_1 value: 80.783 - type: mrr_at_10 value: 87.546 - type: mrr_at_100 value: 87.627 - type: mrr_at_1000 value: 87.63 - type: mrr_at_3 value: 86.79400000000001 - type: mrr_at_5 value: 87.32799999999999 - type: ndcg_at_1 value: 80.783 - type: ndcg_at_10 value: 86.54899999999999 - type: ndcg_at_100 value: 87.355 - type: ndcg_at_1000 value: 87.629 - type: ndcg_at_3 value: 84.82 - type: ndcg_at_5 value: 85.83800000000001 - type: precision_at_1 value: 80.783 - type: precision_at_10 value: 10.327 - type: precision_at_100 value: 1.094 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 32.218 - type: precision_at_5 value: 20.012 - type: recall_at_1 value: 74.97 - type: recall_at_10 value: 93.072 - type: recall_at_100 value: 96.218 - type: recall_at_1000 value: 97.991 - type: recall_at_3 value: 88.357 - type: recall_at_5 value: 90.983 - task: type: Retrieval dataset: type: fiqa name: MTEB FiQA2018 config: default split: test revision: None metrics: - type: map_at_1 value: 21.12 - type: map_at_10 value: 35.908 - type: map_at_100 value: 37.895 - type: map_at_1000 value: 38.068000000000005 - type: map_at_3 value: 31.189 - type: map_at_5 value: 33.908 - type: mrr_at_1 value: 42.901 - type: mrr_at_10 value: 52.578 - type: mrr_at_100 value: 53.308 - type: mrr_at_1000 value: 53.342 - type: mrr_at_3 value: 50.385999999999996 - type: mrr_at_5 value: 51.62799999999999 - type: ndcg_at_1 value: 42.901 - type: ndcg_at_10 value: 44.302 - type: ndcg_at_100 value: 51.132999999999996 - type: ndcg_at_1000 value: 53.848 - type: ndcg_at_3 value: 40.464 - type: ndcg_at_5 value: 41.743 - type: precision_at_1 value: 42.901 - type: precision_at_10 value: 12.423 - type: precision_at_100 value: 1.968 - type: precision_at_1000 value: 0.246 - type: precision_at_3 value: 27.622999999999998 - type: precision_at_5 value: 20.278 - type: recall_at_1 value: 21.12 - type: recall_at_10 value: 52.091 - type: recall_at_100 value: 77.062 - type: recall_at_1000 value: 93.082 - type: recall_at_3 value: 37.223 - type: recall_at_5 value: 43.826 - task: type: Retrieval dataset: type: hotpotqa name: MTEB HotpotQA config: default split: test revision: None metrics: - type: map_at_1 value: 38.940000000000005 - type: map_at_10 value: 62.239999999999995 - type: map_at_100 value: 63.141000000000005 - type: map_at_1000 value: 63.205999999999996 - type: map_at_3 value: 58.738 - type: map_at_5 value: 60.924 - type: mrr_at_1 value: 77.88000000000001 - type: mrr_at_10 value: 83.7 - type: mrr_at_100 value: 83.882 - type: mrr_at_1000 value: 83.889 - type: mrr_at_3 value: 82.748 - type: mrr_at_5 value: 83.381 - type: ndcg_at_1 value: 77.88000000000001 - type: ndcg_at_10 value: 70.462 - type: ndcg_at_100 value: 73.564 - type: ndcg_at_1000 value: 74.78099999999999 - type: ndcg_at_3 value: 65.524 - type: ndcg_at_5 value: 68.282 - type: precision_at_1 value: 77.88000000000001 - type: precision_at_10 value: 14.81 - type: precision_at_100 value: 1.7229999999999999 - type: precision_at_1000 value: 0.188 - type: precision_at_3 value: 42.083999999999996 - type: precision_at_5 value: 27.43 - type: recall_at_1 value: 38.940000000000005 - type: recall_at_10 value: 74.051 - type: recall_at_100 value: 86.158 - type: recall_at_1000 value: 94.146 - type: recall_at_3 value: 63.126000000000005 - type: recall_at_5 value: 68.575 - task: type: Classification dataset: type: mteb/imdb name: MTEB ImdbClassification config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 91.23440000000001 - type: ap value: 87.33490392265892 - type: f1 value: 91.21374626021836 - task: type: Retrieval dataset: type: msmarco name: MTEB MSMARCO config: default split: dev revision: None metrics: - type: map_at_1 value: 22.137999999999998 - type: map_at_10 value: 34.471000000000004 - type: map_at_100 value: 35.634 - type: map_at_1000 value: 35.685 - type: map_at_3 value: 30.587999999999997 - type: map_at_5 value: 32.812999999999995 - type: mrr_at_1 value: 22.736 - type: mrr_at_10 value: 35.092 - type: mrr_at_100 value: 36.193999999999996 - type: mrr_at_1000 value: 36.238 - type: mrr_at_3 value: 31.28 - type: mrr_at_5 value: 33.498 - type: ndcg_at_1 value: 22.736 - type: ndcg_at_10 value: 41.388999999999996 - type: ndcg_at_100 value: 46.967999999999996 - type: ndcg_at_1000 value: 48.178 - type: ndcg_at_3 value: 33.503 - type: ndcg_at_5 value: 37.484 - type: precision_at_1 value: 22.736 - type: precision_at_10 value: 6.54 - type: precision_at_100 value: 0.9339999999999999 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 14.249999999999998 - type: precision_at_5 value: 10.562000000000001 - type: recall_at_1 value: 22.137999999999998 - type: recall_at_10 value: 62.629999999999995 - type: recall_at_100 value: 88.375 - type: recall_at_1000 value: 97.529 - type: recall_at_3 value: 41.245 - type: recall_at_5 value: 50.808 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (en) config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 95.25079799361606 - type: f1 value: 95.00726023695032 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (en) config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 78.23757409940721 - type: f1 value: 58.534958803195714 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (en) config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 76.20040349697378 - type: f1 value: 74.31261149784696 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (en) config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 79.35104236718227 - type: f1 value: 79.7373049864316 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-p2p name: MTEB MedrxivClusteringP2P config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 34.478828180753126 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-s2s name: MTEB MedrxivClusteringS2S config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 32.25696147904426 - task: type: Reranking dataset: type: mteb/mind_small name: MTEB MindSmallReranking config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 32.82488548405117 - type: mrr value: 34.066706809031096 - task: type: Retrieval dataset: type: nfcorpus name: MTEB NFCorpus config: default split: test revision: None metrics: - type: map_at_1 value: 6.557 - type: map_at_10 value: 15.055 - type: map_at_100 value: 19.575 - type: map_at_1000 value: 21.267 - type: map_at_3 value: 10.86 - type: map_at_5 value: 12.83 - type: mrr_at_1 value: 50.464 - type: mrr_at_10 value: 59.050999999999995 - type: mrr_at_100 value: 59.436 - type: mrr_at_1000 value: 59.476 - type: mrr_at_3 value: 56.811 - type: mrr_at_5 value: 58.08 - type: ndcg_at_1 value: 47.988 - type: ndcg_at_10 value: 38.645 - type: ndcg_at_100 value: 36.339 - type: ndcg_at_1000 value: 45.279 - type: ndcg_at_3 value: 43.35 - type: ndcg_at_5 value: 41.564 - type: precision_at_1 value: 49.845 - type: precision_at_10 value: 28.544999999999998 - type: precision_at_100 value: 9.322 - type: precision_at_1000 value: 2.258 - type: precision_at_3 value: 40.144000000000005 - type: precision_at_5 value: 35.913000000000004 - type: recall_at_1 value: 6.557 - type: recall_at_10 value: 19.5 - type: recall_at_100 value: 37.153999999999996 - type: recall_at_1000 value: 69.581 - type: recall_at_3 value: 12.133 - type: recall_at_5 value: 15.43 - task: type: Retrieval dataset: type: nq name: MTEB NQ config: default split: test revision: None metrics: - type: map_at_1 value: 31.740000000000002 - type: map_at_10 value: 48.150999999999996 - type: map_at_100 value: 49.125 - type: map_at_1000 value: 49.149 - type: map_at_3 value: 43.645 - type: map_at_5 value: 46.417 - type: mrr_at_1 value: 35.892 - type: mrr_at_10 value: 50.524 - type: mrr_at_100 value: 51.232 - type: mrr_at_1000 value: 51.24999999999999 - type: mrr_at_3 value: 46.852 - type: mrr_at_5 value: 49.146 - type: ndcg_at_1 value: 35.892 - type: ndcg_at_10 value: 56.08800000000001 - type: ndcg_at_100 value: 60.077000000000005 - type: ndcg_at_1000 value: 60.632 - type: ndcg_at_3 value: 47.765 - type: ndcg_at_5 value: 52.322 - type: precision_at_1 value: 35.892 - type: precision_at_10 value: 9.296 - type: precision_at_100 value: 1.154 - type: precision_at_1000 value: 0.12 - type: precision_at_3 value: 21.92 - type: precision_at_5 value: 15.781999999999998 - type: recall_at_1 value: 31.740000000000002 - type: recall_at_10 value: 77.725 - type: recall_at_100 value: 94.841 - type: recall_at_1000 value: 99.003 - type: recall_at_3 value: 56.407 - type: recall_at_5 value: 66.848 - task: type: Retrieval dataset: type: quora name: MTEB QuoraRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 71.429 - type: map_at_10 value: 85.42699999999999 - type: map_at_100 value: 86.063 - type: map_at_1000 value: 86.077 - type: map_at_3 value: 82.573 - type: map_at_5 value: 84.371 - type: mrr_at_1 value: 82.34 - type: mrr_at_10 value: 88.247 - type: mrr_at_100 value: 88.357 - type: mrr_at_1000 value: 88.357 - type: mrr_at_3 value: 87.38 - type: mrr_at_5 value: 87.981 - type: ndcg_at_1 value: 82.34 - type: ndcg_at_10 value: 88.979 - type: ndcg_at_100 value: 90.18599999999999 - type: ndcg_at_1000 value: 90.254 - type: ndcg_at_3 value: 86.378 - type: ndcg_at_5 value: 87.821 - type: precision_at_1 value: 82.34 - type: precision_at_10 value: 13.482 - type: precision_at_100 value: 1.537 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 37.852999999999994 - type: precision_at_5 value: 24.798000000000002 - type: recall_at_1 value: 71.429 - type: recall_at_10 value: 95.64099999999999 - type: recall_at_100 value: 99.723 - type: recall_at_1000 value: 99.98 - type: recall_at_3 value: 88.011 - type: recall_at_5 value: 92.246 - task: type: Clustering dataset: type: mteb/reddit-clustering name: MTEB RedditClustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 60.62148584103299 - task: type: Clustering dataset: type: mteb/reddit-clustering-p2p name: MTEB RedditClusteringP2P config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 63.2923987272903 - task: type: Retrieval dataset: type: scidocs name: MTEB SCIDOCS config: default split: test revision: None metrics: - type: map_at_1 value: 5.128 - type: map_at_10 value: 14.63 - type: map_at_100 value: 17.285 - type: map_at_1000 value: 17.676 - type: map_at_3 value: 9.993 - type: map_at_5 value: 12.286999999999999 - type: mrr_at_1 value: 25.4 - type: mrr_at_10 value: 38.423 - type: mrr_at_100 value: 39.497 - type: mrr_at_1000 value: 39.531 - type: mrr_at_3 value: 34.9 - type: mrr_at_5 value: 37.01 - type: ndcg_at_1 value: 25.4 - type: ndcg_at_10 value: 24.062 - type: ndcg_at_100 value: 33.823 - type: ndcg_at_1000 value: 39.663 - type: ndcg_at_3 value: 22.246 - type: ndcg_at_5 value: 19.761 - type: precision_at_1 value: 25.4 - type: precision_at_10 value: 12.85 - type: precision_at_100 value: 2.71 - type: precision_at_1000 value: 0.41000000000000003 - type: precision_at_3 value: 21.4 - type: precision_at_5 value: 17.86 - type: recall_at_1 value: 5.128 - type: recall_at_10 value: 26.06 - type: recall_at_100 value: 54.993 - type: recall_at_1000 value: 83.165 - type: recall_at_3 value: 13.003 - type: recall_at_5 value: 18.117 - task: type: STS dataset: type: mteb/sickr-sts name: MTEB SICK-R config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 87.5466779326323 - type: cos_sim_spearman value: 82.79782085421951 - type: euclidean_pearson value: 84.76929982677339 - type: euclidean_spearman value: 82.51802536005597 - type: manhattan_pearson value: 84.76736312526177 - type: manhattan_spearman value: 82.50799656335593 - task: type: STS dataset: type: mteb/sts12-sts name: MTEB STS12 config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 86.40486308108694 - type: cos_sim_spearman value: 77.12670500926937 - type: euclidean_pearson value: 85.23836845503847 - type: euclidean_spearman value: 78.41475117006176 - type: manhattan_pearson value: 85.24302039610805 - type: manhattan_spearman value: 78.4053162562707 - task: type: STS dataset: type: mteb/sts13-sts name: MTEB STS13 config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 88.83570289087565 - type: cos_sim_spearman value: 89.28563503553643 - type: euclidean_pearson value: 87.77516003996445 - type: euclidean_spearman value: 88.8656149534085 - type: manhattan_pearson value: 87.75568872417946 - type: manhattan_spearman value: 88.80445489340585 - task: type: STS dataset: type: mteb/sts14-sts name: MTEB STS14 config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 86.776406555485 - type: cos_sim_spearman value: 83.8288465070091 - type: euclidean_pearson value: 85.37827999808123 - type: euclidean_spearman value: 84.11079529992739 - type: manhattan_pearson value: 85.35336495689121 - type: manhattan_spearman value: 84.08618492649347 - task: type: STS dataset: type: mteb/sts15-sts name: MTEB STS15 config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 88.57644404820684 - type: cos_sim_spearman value: 89.69728364350713 - type: euclidean_pearson value: 88.28202320389443 - type: euclidean_spearman value: 88.9560567319321 - type: manhattan_pearson value: 88.29461100044172 - type: manhattan_spearman value: 88.96030920678558 - task: type: STS dataset: type: mteb/sts16-sts name: MTEB STS16 config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 85.05211938460621 - type: cos_sim_spearman value: 86.43413865667489 - type: euclidean_pearson value: 85.62760689259562 - type: euclidean_spearman value: 86.28867831982394 - type: manhattan_pearson value: 85.60828879163458 - type: manhattan_spearman value: 86.27823731462473 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-en) config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 90.00254140466377 - type: cos_sim_spearman value: 89.66118745178284 - type: euclidean_pearson value: 89.46985446236553 - type: euclidean_spearman value: 88.92649032371526 - type: manhattan_pearson value: 89.49600028180247 - type: manhattan_spearman value: 88.86948431519099 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (en) config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 68.93578321067938 - type: cos_sim_spearman value: 69.60639595839257 - type: euclidean_pearson value: 70.33485090574897 - type: euclidean_spearman value: 69.03380379185452 - type: manhattan_pearson value: 70.42097254943839 - type: manhattan_spearman value: 69.25296348304255 - task: type: STS dataset: type: mteb/stsbenchmark-sts name: MTEB STSBenchmark config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 87.29588700755069 - type: cos_sim_spearman value: 88.30389489193672 - type: euclidean_pearson value: 87.60349838180346 - type: euclidean_spearman value: 87.91041868311692 - type: manhattan_pearson value: 87.59373630607907 - type: manhattan_spearman value: 87.88690174001724 - task: type: Reranking dataset: type: mteb/scidocs-reranking name: MTEB SciDocsRR config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 87.8030655700857 - type: mrr value: 96.3950637234951 - task: type: Retrieval dataset: type: scifact name: MTEB SciFact config: default split: test revision: None metrics: - type: map_at_1 value: 60.028000000000006 - type: map_at_10 value: 69.855 - type: map_at_100 value: 70.257 - type: map_at_1000 value: 70.283 - type: map_at_3 value: 66.769 - type: map_at_5 value: 68.679 - type: mrr_at_1 value: 62.666999999999994 - type: mrr_at_10 value: 70.717 - type: mrr_at_100 value: 71.00800000000001 - type: mrr_at_1000 value: 71.033 - type: mrr_at_3 value: 68.389 - type: mrr_at_5 value: 69.939 - type: ndcg_at_1 value: 62.666999999999994 - type: ndcg_at_10 value: 74.715 - type: ndcg_at_100 value: 76.364 - type: ndcg_at_1000 value: 76.89399999999999 - type: ndcg_at_3 value: 69.383 - type: ndcg_at_5 value: 72.322 - type: precision_at_1 value: 62.666999999999994 - type: precision_at_10 value: 10.067 - type: precision_at_100 value: 1.09 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 27.111 - type: precision_at_5 value: 18.267 - type: recall_at_1 value: 60.028000000000006 - type: recall_at_10 value: 88.822 - type: recall_at_100 value: 96.167 - type: recall_at_1000 value: 100.0 - type: recall_at_3 value: 74.367 - type: recall_at_5 value: 81.661 - task: type: PairClassification dataset: type: mteb/sprintduplicatequestions-pairclassification name: MTEB SprintDuplicateQuestions config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.84554455445544 - type: cos_sim_ap value: 96.54482863244152 - type: cos_sim_f1 value: 92.13709677419355 - type: cos_sim_precision value: 92.88617886178862 - type: cos_sim_recall value: 91.4 - type: dot_accuracy value: 99.76039603960396 - type: dot_ap value: 93.20115278887057 - type: dot_f1 value: 87.92079207920793 - type: dot_precision value: 87.05882352941177 - type: dot_recall value: 88.8 - type: euclidean_accuracy value: 99.84950495049505 - type: euclidean_ap value: 96.53268343961348 - type: euclidean_f1 value: 92.23697650663942 - type: euclidean_precision value: 94.258872651357 - type: euclidean_recall value: 90.3 - type: manhattan_accuracy value: 99.85346534653465 - type: manhattan_ap value: 96.54495433438355 - type: manhattan_f1 value: 92.51012145748987 - type: manhattan_precision value: 93.64754098360656 - type: manhattan_recall value: 91.4 - type: max_accuracy value: 99.85346534653465 - type: max_ap value: 96.54495433438355 - type: max_f1 value: 92.51012145748987 - task: type: Clustering dataset: type: mteb/stackexchange-clustering name: MTEB StackExchangeClustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 66.46940443952006 - task: type: Clustering dataset: type: mteb/stackexchange-clustering-p2p name: MTEB StackExchangeClusteringP2P config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 36.396194493841584 - task: type: Reranking dataset: type: mteb/stackoverflowdupquestions-reranking name: MTEB StackOverflowDupQuestions config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 54.881717673695555 - type: mrr value: 55.73439224174519 - task: type: Summarization dataset: type: mteb/summeval name: MTEB SummEval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 31.438177268254087 - type: cos_sim_spearman value: 30.96177698848688 - type: dot_pearson value: 30.513850376431435 - type: dot_spearman value: 29.932421046509706 - task: type: Retrieval dataset: type: trec-covid name: MTEB TRECCOVID config: default split: test revision: None metrics: - type: map_at_1 value: 0.21 - type: map_at_10 value: 1.727 - type: map_at_100 value: 9.881 - type: map_at_1000 value: 24.245 - type: map_at_3 value: 0.615 - type: map_at_5 value: 0.966 - type: mrr_at_1 value: 78.0 - type: mrr_at_10 value: 87.333 - type: mrr_at_100 value: 87.333 - type: mrr_at_1000 value: 87.333 - type: mrr_at_3 value: 86.333 - type: mrr_at_5 value: 87.333 - type: ndcg_at_1 value: 74.0 - type: ndcg_at_10 value: 69.12700000000001 - type: ndcg_at_100 value: 53.893 - type: ndcg_at_1000 value: 49.639 - type: ndcg_at_3 value: 74.654 - type: ndcg_at_5 value: 73.232 - type: precision_at_1 value: 78.0 - type: precision_at_10 value: 72.8 - type: precision_at_100 value: 55.42 - type: precision_at_1000 value: 21.73 - type: precision_at_3 value: 79.333 - type: precision_at_5 value: 77.2 - type: recall_at_1 value: 0.21 - type: recall_at_10 value: 1.9709999999999999 - type: recall_at_100 value: 13.555 - type: recall_at_1000 value: 46.961999999999996 - type: recall_at_3 value: 0.66 - type: recall_at_5 value: 1.052 - task: type: Retrieval dataset: type: webis-touche2020 name: MTEB Touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 2.456 - type: map_at_10 value: 9.426 - type: map_at_100 value: 16.066 - type: map_at_1000 value: 17.652 - type: map_at_3 value: 5.2459999999999996 - type: map_at_5 value: 6.5360000000000005 - type: mrr_at_1 value: 34.694 - type: mrr_at_10 value: 47.666 - type: mrr_at_100 value: 48.681999999999995 - type: mrr_at_1000 value: 48.681999999999995 - type: mrr_at_3 value: 43.878 - type: mrr_at_5 value: 46.224 - type: ndcg_at_1 value: 31.633 - type: ndcg_at_10 value: 23.454 - type: ndcg_at_100 value: 36.616 - type: ndcg_at_1000 value: 48.596000000000004 - type: ndcg_at_3 value: 28.267999999999997 - type: ndcg_at_5 value: 25.630999999999997 - type: precision_at_1 value: 34.694 - type: precision_at_10 value: 20.204 - type: precision_at_100 value: 7.754999999999999 - type: precision_at_1000 value: 1.5709999999999997 - type: precision_at_3 value: 29.252 - type: precision_at_5 value: 24.898 - type: recall_at_1 value: 2.456 - type: recall_at_10 value: 14.951 - type: recall_at_100 value: 48.399 - type: recall_at_1000 value: 85.077 - type: recall_at_3 value: 6.1370000000000005 - type: recall_at_5 value: 8.671 - task: type: Classification dataset: type: mteb/toxic_conversations_50k name: MTEB ToxicConversationsClassification config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 71.86240000000001 - type: ap value: 14.678570078747494 - type: f1 value: 55.295967793934445 - task: type: Classification dataset: type: mteb/tweet_sentiment_extraction name: MTEB TweetSentimentExtractionClassification config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 59.17374080362195 - type: f1 value: 59.54410874861454 - task: type: Clustering dataset: type: mteb/twentynewsgroups-clustering name: MTEB TwentyNewsgroupsClustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 51.91227822485289 - task: type: PairClassification dataset: type: mteb/twittersemeval2015-pairclassification name: MTEB TwitterSemEval2015 config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 87.12523097097217 - type: cos_sim_ap value: 77.59606075943269 - type: cos_sim_f1 value: 71.11395646606915 - type: cos_sim_precision value: 69.07960199004975 - type: cos_sim_recall value: 73.27176781002639 - type: dot_accuracy value: 84.68736961316088 - type: dot_ap value: 68.47167450741459 - type: dot_f1 value: 64.42152354914874 - type: dot_precision value: 60.887949260042284 - type: dot_recall value: 68.3905013192612 - type: euclidean_accuracy value: 86.88084878106932 - type: euclidean_ap value: 77.27351204978599 - type: euclidean_f1 value: 70.99179716629381 - type: euclidean_precision value: 67.10526315789474 - type: euclidean_recall value: 75.35620052770449 - type: manhattan_accuracy value: 86.83316445133218 - type: manhattan_ap value: 77.21835357308716 - type: manhattan_f1 value: 71.05587004676349 - type: manhattan_precision value: 66.58210332103322 - type: manhattan_recall value: 76.17414248021109 - type: max_accuracy value: 87.12523097097217 - type: max_ap value: 77.59606075943269 - type: max_f1 value: 71.11395646606915 - task: type: PairClassification dataset: type: mteb/twitterurlcorpus-pairclassification name: MTEB TwitterURLCorpus config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 88.97232894787906 - type: cos_sim_ap value: 85.9613736469497 - type: cos_sim_f1 value: 78.40216655382532 - type: cos_sim_precision value: 72.97512437810946 - type: cos_sim_recall value: 84.70126270403449 - type: dot_accuracy value: 88.04866689952264 - type: dot_ap value: 83.15465089499936 - type: dot_f1 value: 76.32698287879329 - type: dot_precision value: 71.23223697378077 - type: dot_recall value: 82.20665229442562 - type: euclidean_accuracy value: 88.67543757519307 - type: euclidean_ap value: 85.4524355531532 - type: euclidean_f1 value: 77.78729106950081 - type: euclidean_precision value: 75.3009009009009 - type: euclidean_recall value: 80.44348629504158 - type: manhattan_accuracy value: 88.65991384328792 - type: manhattan_ap value: 85.43109069046837 - type: manhattan_f1 value: 77.72639551396425 - type: manhattan_precision value: 73.73402417962004 - type: manhattan_recall value: 82.17585463504774 - type: max_accuracy value: 88.97232894787906 - type: max_ap value: 85.9613736469497 - type: max_f1 value: 78.40216655382532 --- <h1 align="center">GIST Large Embedding v0</h1> *GISTEmbed: Guided In-sample Selection of Training Negatives for Text Embedding Fine-tuning* The model is fine-tuned on top of the [BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5) using the [MEDI dataset](https://github.com/xlang-ai/instructor-embedding.git) augmented with mined triplets from the [MTEB Classification](https://huggingface.co/mteb) training dataset (excluding data from the Amazon Polarity Classification task). The model does not require any instruction for generating embeddings. This means that queries for retrieval tasks can be directly encoded without crafting instructions. Technical paper: [GISTEmbed: Guided In-sample Selection of Training Negatives for Text Embedding Fine-tuning](https://arxiv.org/abs/2402.16829) # Data The dataset used is a compilation of the MEDI and MTEB Classification training datasets. Third-party datasets may be subject to additional terms and conditions under their associated licenses. A HuggingFace Dataset version of the compiled dataset, and the specific revision used to train the model, is available: - Dataset: [avsolatorio/medi-data-mteb_avs_triplets](https://huggingface.co/datasets/avsolatorio/medi-data-mteb_avs_triplets) - Revision: 238a0499b6e6b690cc64ea56fde8461daa8341bb The dataset contains a `task_type` key, which can be used to select only the mteb classification tasks (prefixed with `mteb_`). The **MEDI Dataset** is published in the following paper: [One Embedder, Any Task: Instruction-Finetuned Text Embeddings](https://arxiv.org/abs/2212.09741). The MTEB Benchmark results of the GIST embedding model, compared with the base model, suggest that the fine-tuning dataset has perturbed the model considerably, which resulted in significant improvements in certain tasks while adversely degrading performance in some. The retrieval performance for the TRECCOVID task is of note. The fine-tuning dataset does not contain significant knowledge about COVID-19, which could have caused the observed performance degradation. We found some evidence, detailed in the paper, that thematic coverage of the fine-tuning data can affect downstream performance. # Usage The model can be easily loaded using the Sentence Transformers library. ```Python import torch.nn.functional as F from sentence_transformers import SentenceTransformer revision = None # Replace with the specific revision to ensure reproducibility if the model is updated. model = SentenceTransformer("avsolatorio/GIST-large-Embedding-v0", revision=revision) texts = [ "Illustration of the REaLTabFormer model. The left block shows the non-relational tabular data model using GPT-2 with a causal LM head. In contrast, the right block shows how a relational dataset's child table is modeled using a sequence-to-sequence (Seq2Seq) model. The Seq2Seq model uses the observations in the parent table to condition the generation of the observations in the child table. The trained GPT-2 model on the parent table, with weights frozen, is also used as the encoder in the Seq2Seq model.", "Predicting human mobility holds significant practical value, with applications ranging from enhancing disaster risk planning to simulating epidemic spread. In this paper, we present the GeoFormer, a decoder-only transformer model adapted from the GPT architecture to forecast human mobility.", "As the economies of Southeast Asia continue adopting digital technologies, policy makers increasingly ask how to prepare the workforce for emerging labor demands. However, little is known about the skills that workers need to adapt to these changes" ] # Compute embeddings embeddings = model.encode(texts, convert_to_tensor=True) # Compute cosine-similarity for each pair of sentences scores = F.cosine_similarity(embeddings.unsqueeze(1), embeddings.unsqueeze(0), dim=-1) print(scores.cpu().numpy()) ``` # Training Parameters Below are the training parameters used to fine-tune the model: ``` Epochs = 40 Warmup ratio = 0.1 Learning rate = 5e-6 Batch size = 16 Checkpoint step = 171000 Contrastive loss temperature = 0.01 ``` # Evaluation The model was evaluated using the [MTEB Evaluation](https://huggingface.co/mteb) suite. # Citation Please cite our work if you use GISTEmbed or the datasets we published in your projects or research. 🤗 ``` @article{solatorio2024gistembed, title={GISTEmbed: Guided In-sample Selection of Training Negatives for Text Embedding Fine-tuning}, author={Aivin V. Solatorio}, journal={arXiv preprint arXiv:2402.16829}, year={2024}, URL={https://arxiv.org/abs/2402.16829} eprint={2402.16829}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` # Acknowledgements This work is supported by the "KCP IV - Exploring Data Use in the Development Economics Literature using Large Language Models (AI and LLMs)" project funded by the [Knowledge for Change Program (KCP)](https://www.worldbank.org/en/programs/knowledge-for-change) of the World Bank - RA-P503405-RESE-TF0C3444. The findings, interpretations, and conclusions expressed in this material are entirely those of the authors. They do not necessarily represent the views of the International Bank for Reconstruction and Development/World Bank and its affiliated organizations, or those of the Executive Directors of the World Bank or the governments they represent.
MMG/xlm-roberta-large-ner-spanish
MMG
"2023-06-05T08:18:20Z"
106,448
23
transformers
[ "transformers", "pytorch", "safetensors", "xlm-roberta", "token-classification", "es", "dataset:CoNLL-2002", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
token-classification
"2022-03-02T23:29:04Z"
--- language: - es datasets: - CoNLL-2002 widget: - text: "Las oficinas de MMG están en Las Rozas." --- # xlm-roberta-large-ner-spanish This model is a XLM-Roberta-large model fine-tuned for Named Entity Recognition (NER) over the Spanish portion of the CoNLL-2002 dataset. Evaluating it over the test subset of this dataset, we get a F1-score of 89.17, being one of the best NER for Spanish available at the moment.
facebook/dinov2-small
facebook
"2023-09-06T11:24:10Z"
105,903
7
transformers
[ "transformers", "pytorch", "safetensors", "dinov2", "image-feature-extraction", "dino", "vision", "arxiv:2304.07193", "license:apache-2.0", "endpoints_compatible", "region:us" ]
image-feature-extraction
"2023-07-31T16:53:09Z"
--- license: apache-2.0 tags: - dino - vision --- # Vision Transformer (small-sized model) trained using DINOv2 Vision Transformer (ViT) model trained using the DINOv2 method. It was introduced in the paper [DINOv2: Learning Robust Visual Features without Supervision](https://arxiv.org/abs/2304.07193) by Oquab et al. and first released in [this repository](https://github.com/facebookresearch/dinov2). Disclaimer: The team releasing DINOv2 did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The Vision Transformer (ViT) is a transformer encoder model (BERT-like) pretrained on a large collection of images in a self-supervised fashion. Images are presented to the model as a sequence of fixed-size patches, which are linearly embedded. One also adds a [CLS] token to the beginning of a sequence to use it for classification tasks. One also adds absolute position embeddings before feeding the sequence to the layers of the Transformer encoder. Note that this model does not include any fine-tuned heads. By pre-training the model, it learns an inner representation of images that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled images for instance, you can train a standard classifier by placing a linear layer on top of the pre-trained encoder. One typically places a linear layer on top of the [CLS] token, as the last hidden state of this token can be seen as a representation of an entire image. ## Intended uses & limitations You can use the raw model for feature extraction. See the [model hub](https://huggingface.co/models?search=facebook/dinov2) to look for fine-tuned versions on a task that interests you. ### How to use Here is how to use this model: ```python from transformers import AutoImageProcessor, AutoModel from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) processor = AutoImageProcessor.from_pretrained('facebook/dinov2-small') model = AutoModel.from_pretrained('facebook/dinov2-small') inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state ``` ### BibTeX entry and citation info ```bibtex misc{oquab2023dinov2, title={DINOv2: Learning Robust Visual Features without Supervision}, author={Maxime Oquab and Timothée Darcet and Théo Moutakanni and Huy Vo and Marc Szafraniec and Vasil Khalidov and Pierre Fernandez and Daniel Haziza and Francisco Massa and Alaaeldin El-Nouby and Mahmoud Assran and Nicolas Ballas and Wojciech Galuba and Russell Howes and Po-Yao Huang and Shang-Wen Li and Ishan Misra and Michael Rabbat and Vasu Sharma and Gabriel Synnaeve and Hu Xu and Hervé Jegou and Julien Mairal and Patrick Labatut and Armand Joulin and Piotr Bojanowski}, year={2023}, eprint={2304.07193}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
Helsinki-NLP/opus-mt-en-de
Helsinki-NLP
"2023-08-16T11:29:21Z"
105,127
21
transformers
[ "transformers", "pytorch", "tf", "jax", "rust", "marian", "text2text-generation", "translation", "en", "de", "license:cc-by-4.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
translation
"2022-03-02T23:29:04Z"
--- tags: - translation license: cc-by-4.0 --- ### opus-mt-en-de ## Table of Contents - [Model Details](#model-details) - [Uses](#uses) - [Risks, Limitations and Biases](#risks-limitations-and-biases) - [Training](#training) - [Evaluation](#evaluation) - [Citation Information](#citation-information) - [How to Get Started With the Model](#how-to-get-started-with-the-model) ## Model Details **Model Description:** - **Developed by:** Language Technology Research Group at the University of Helsinki - **Model Type:** Translation - **Language(s):** - Source Language: English - Target Language: German - **License:** CC-BY-4.0 - **Resources for more information:** - [GitHub Repo](https://github.com/Helsinki-NLP/OPUS-MT-train) ## Uses #### Direct Use This model can be used for translation and text-to-text generation. ## Risks, Limitations and Biases **CONTENT WARNING: Readers should be aware this section contains content that is disturbing, offensive, and can propagate historical and current stereotypes.** Significant research has explored bias and fairness issues with language models (see, e.g., [Sheng et al. (2021)](https://aclanthology.org/2021.acl-long.330.pdf) and [Bender et al. (2021)](https://dl.acm.org/doi/pdf/10.1145/3442188.3445922)). Further details about the dataset for this model can be found in the OPUS readme: [en-de](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/en-de/README.md) #### Training Data ##### Preprocessing * pre-processing: normalization + SentencePiece * dataset: [opus](https://github.com/Helsinki-NLP/Opus-MT) * download original weights: [opus-2020-02-26.zip](https://object.pouta.csc.fi/OPUS-MT-models/en-de/opus-2020-02-26.zip) * test set translations: [opus-2020-02-26.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-de/opus-2020-02-26.test.txt) ## Evaluation #### Results * test set scores: [opus-2020-02-26.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-de/opus-2020-02-26.eval.txt) #### Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | newssyscomb2009.en.de | 23.5 | 0.540 | | news-test2008.en.de | 23.5 | 0.529 | | newstest2009.en.de | 22.3 | 0.530 | | newstest2010.en.de | 24.9 | 0.544 | | newstest2011.en.de | 22.5 | 0.524 | | newstest2012.en.de | 23.0 | 0.525 | | newstest2013.en.de | 26.9 | 0.553 | | newstest2015-ende.en.de | 31.1 | 0.594 | | newstest2016-ende.en.de | 37.0 | 0.636 | | newstest2017-ende.en.de | 29.9 | 0.586 | | newstest2018-ende.en.de | 45.2 | 0.690 | | newstest2019-ende.en.de | 40.9 | 0.654 | | Tatoeba.en.de | 47.3 | 0.664 | ## Citation Information ```bibtex @InProceedings{TiedemannThottingal:EAMT2020, author = {J{\"o}rg Tiedemann and Santhosh Thottingal}, title = {{OPUS-MT} — {B}uilding open translation services for the {W}orld}, booktitle = {Proceedings of the 22nd Annual Conferenec of the European Association for Machine Translation (EAMT)}, year = {2020}, address = {Lisbon, Portugal} } ``` ## How to Get Started With the Model ```python from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-de") ```
EleutherAI/pythia-160m-deduped
EleutherAI
"2023-07-09T16:04:57Z"
105,000
2
transformers
[ "transformers", "pytorch", "safetensors", "gpt_neox", "text-generation", "causal-lm", "pythia", "en", "dataset:EleutherAI/the_pile_deduplicated", "arxiv:2304.01373", "arxiv:2101.00027", "arxiv:2201.07311", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2023-02-08T21:50:19Z"
--- language: - en tags: - pytorch - causal-lm - pythia license: apache-2.0 datasets: - EleutherAI/the_pile_deduplicated --- The *Pythia Scaling Suite* is a collection of models developed to facilitate interpretability research [(see paper)](https://arxiv.org/pdf/2304.01373.pdf). It contains two sets of eight models of sizes 70M, 160M, 410M, 1B, 1.4B, 2.8B, 6.9B, and 12B. For each size, there are two models: one trained on the Pile, and one trained on the Pile after the dataset has been globally deduplicated. All 8 model sizes are trained on the exact same data, in the exact same order. We also provide 154 intermediate checkpoints per model, hosted on Hugging Face as branches. The Pythia model suite was designed to promote scientific research on large language models, especially interpretability research. Despite not centering downstream performance as a design goal, we find the models <a href="#evaluations">match or exceed</a> the performance of similar and same-sized models, such as those in the OPT and GPT-Neo suites. <details> <summary style="font-weight:600">Details on previous early release and naming convention.</summary> Previously, we released an early version of the Pythia suite to the public. However, we decided to retrain the model suite to address a few hyperparameter discrepancies. This model card <a href="#changelog">lists the changes</a>; see appendix B in the Pythia paper for further discussion. We found no difference in benchmark performance between the two Pythia versions. The old models are [still available](https://huggingface.co/models?other=pythia_v0), but we suggest the retrained suite if you are just starting to use Pythia.<br> **This is the current release.** Please note that all models in the *Pythia* suite were renamed in January 2023. For clarity, a <a href="#naming-convention-and-parameter-count">table comparing the old and new names</a> is provided in this model card, together with exact parameter counts. </details> <br> # Pythia-160M-deduped ## Model Details - Developed by: [EleutherAI](http://eleuther.ai) - Model type: Transformer-based Language Model - Language: English - Learn more: [Pythia's GitHub repository](https://github.com/EleutherAI/pythia) for training procedure, config files, and details on how to use. [See paper](https://arxiv.org/pdf/2304.01373.pdf) for more evals and implementation details. - Library: [GPT-NeoX](https://github.com/EleutherAI/gpt-neox) - License: Apache 2.0 - Contact: to ask questions about this model, join the [EleutherAI Discord](https://discord.gg/zBGx3azzUn), and post them in `#release-discussion`. Please read the existing *Pythia* documentation before asking about it in the EleutherAI Discord. For general correspondence: [contact@eleuther. ai](mailto:contact@eleuther.ai). <figure> | Pythia model | Non-Embedding Params | Layers | Model Dim | Heads | Batch Size | Learning Rate | Equivalent Models | | -----------: | -------------------: | :----: | :-------: | :---: | :--------: | :-------------------: | :--------------------: | | 70M | 18,915,328 | 6 | 512 | 8 | 2M | 1.0 x 10<sup>-3</sup> | — | | 160M | 85,056,000 | 12 | 768 | 12 | 2M | 6.0 x 10<sup>-4</sup> | GPT-Neo 125M, OPT-125M | | 410M | 302,311,424 | 24 | 1024 | 16 | 2M | 3.0 x 10<sup>-4</sup> | OPT-350M | | 1.0B | 805,736,448 | 16 | 2048 | 8 | 2M | 3.0 x 10<sup>-4</sup> | — | | 1.4B | 1,208,602,624 | 24 | 2048 | 16 | 2M | 2.0 x 10<sup>-4</sup> | GPT-Neo 1.3B, OPT-1.3B | | 2.8B | 2,517,652,480 | 32 | 2560 | 32 | 2M | 1.6 x 10<sup>-4</sup> | GPT-Neo 2.7B, OPT-2.7B | | 6.9B | 6,444,163,072 | 32 | 4096 | 32 | 2M | 1.2 x 10<sup>-4</sup> | OPT-6.7B | | 12B | 11,327,027,200 | 36 | 5120 | 40 | 2M | 1.2 x 10<sup>-4</sup> | — | <figcaption>Engineering details for the <i>Pythia Suite</i>. Deduped and non-deduped models of a given size have the same hyperparameters. “Equivalent” models have <b>exactly</b> the same architecture, and the same number of non-embedding parameters.</figcaption> </figure> ## Uses and Limitations ### Intended Use The primary intended use of Pythia is research on the behavior, functionality, and limitations of large language models. This suite is intended to provide a controlled setting for performing scientific experiments. We also provide 154 checkpoints per model: initial `step0`, 10 log-spaced checkpoints `step{1,2,4...512}`, and 143 evenly-spaced checkpoints from `step1000` to `step143000`. These checkpoints are hosted on Hugging Face as branches. Note that branch `143000` corresponds exactly to the model checkpoint on the `main` branch of each model. You may also further fine-tune and adapt Pythia-160M-deduped for deployment, as long as your use is in accordance with the Apache 2.0 license. Pythia models work with the Hugging Face [Transformers Library](https://huggingface.co/docs/transformers/index). If you decide to use pre-trained Pythia-160M-deduped as a basis for your fine-tuned model, please conduct your own risk and bias assessment. ### Out-of-scope use The Pythia Suite is **not** intended for deployment. It is not a in itself a product and cannot be used for human-facing interactions. For example, the model may generate harmful or offensive text. Please evaluate the risks associated with your particular use case. Pythia models are English-language only, and are not suitable for translation or generating text in other languages. Pythia-160M-deduped has not been fine-tuned for downstream contexts in which language models are commonly deployed, such as writing genre prose, or commercial chatbots. This means Pythia-160M-deduped will **not** respond to a given prompt the way a product like ChatGPT does. This is because, unlike this model, ChatGPT was fine-tuned using methods such as Reinforcement Learning from Human Feedback (RLHF) to better “follow” human instructions. ### Limitations and biases The core functionality of a large language model is to take a string of text and predict the next token. The token used by the model need not produce the most “accurate” text. Never rely on Pythia-160M-deduped to produce factually accurate output. This model was trained on [the Pile](https://pile.eleuther.ai/), a dataset known to contain profanity and texts that are lewd or otherwise offensive. See [Section 6 of the Pile paper](https://arxiv.org/abs/2101.00027) for a discussion of documented biases with regards to gender, religion, and race. Pythia-160M-deduped may produce socially unacceptable or undesirable text, *even if* the prompt itself does not include anything explicitly offensive. If you plan on using text generated through, for example, the Hosted Inference API, we recommend having a human curate the outputs of this language model before presenting it to other people. Please inform your audience that the text was generated by Pythia-160M-deduped. ### Quickstart Pythia models can be loaded and used via the following code, demonstrated here for the third `pythia-70m-deduped` checkpoint: ```python from transformers import GPTNeoXForCausalLM, AutoTokenizer model = GPTNeoXForCausalLM.from_pretrained( "EleutherAI/pythia-70m-deduped", revision="step3000", cache_dir="./pythia-70m-deduped/step3000", ) tokenizer = AutoTokenizer.from_pretrained( "EleutherAI/pythia-70m-deduped", revision="step3000", cache_dir="./pythia-70m-deduped/step3000", ) inputs = tokenizer("Hello, I am", return_tensors="pt") tokens = model.generate(**inputs) tokenizer.decode(tokens[0]) ``` Revision/branch `step143000` corresponds exactly to the model checkpoint on the `main` branch of each model.<br> For more information on how to use all Pythia models, see [documentation on GitHub](https://github.com/EleutherAI/pythia). ## Training ### Training data Pythia-160M-deduped was trained on the Pile **after the dataset has been globally deduplicated**.<br> [The Pile](https://pile.eleuther.ai/) is a 825GiB general-purpose dataset in English. It was created by EleutherAI specifically for training large language models. It contains texts from 22 diverse sources, roughly broken down into five categories: academic writing (e.g. arXiv), internet (e.g. CommonCrawl), prose (e.g. Project Gutenberg), dialogue (e.g. YouTube subtitles), and miscellaneous (e.g. GitHub, Enron Emails). See [the Pile paper](https://arxiv.org/abs/2101.00027) for a breakdown of all data sources, methodology, and a discussion of ethical implications. Consult [the datasheet](https://arxiv.org/abs/2201.07311) for more detailed documentation about the Pile and its component datasets. The Pile can be downloaded from the [official website](https://pile.eleuther.ai/), or from a [community mirror](https://the-eye.eu/public/AI/pile/). ### Training procedure All models were trained on the exact same data, in the exact same order. Each model saw 299,892,736,000 tokens during training, and 143 checkpoints for each model are saved every 2,097,152,000 tokens, spaced evenly throughout training, from `step1000` to `step143000` (which is the same as `main`). In addition, we also provide frequent early checkpoints: `step0` and `step{1,2,4...512}`. This corresponds to training for just under 1 epoch on the Pile for non-deduplicated models, and about 1.5 epochs on the deduplicated Pile. All *Pythia* models trained for 143000 steps at a batch size of 2M (2,097,152 tokens).<br> See [GitHub](https://github.com/EleutherAI/pythia) for more details on training procedure, including [how to reproduce it](https://github.com/EleutherAI/pythia/blob/main/README.md#reproducing-training).<br> Pythia uses the same tokenizer as [GPT-NeoX- 20B](https://huggingface.co/EleutherAI/gpt-neox-20b). ## Evaluations All 16 *Pythia* models were evaluated using the [LM Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness). You can access the results by model and step at `results/json/*` in the [GitHub repository](https://github.com/EleutherAI/pythia/tree/main/results/json/).<br> Expand the sections below to see plots of evaluation results for all Pythia and Pythia-deduped models compared with OPT and BLOOM. <details> <summary>LAMBADA – OpenAI</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/lambada_openai_v1.png" style="width:auto"/> </details> <details> <summary>Physical Interaction: Question Answering (PIQA)</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/piqa_v1.png" style="width:auto"/> </details> <details> <summary>WinoGrande</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/winogrande_v1.png" style="width:auto"/> </details> <details> <summary>AI2 Reasoning Challenge—Easy Set</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/arc_easy_v1.png" style="width:auto"/> </details> <details> <summary>SciQ</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/sciq_v1.png" style="width:auto"/> </details> ## Changelog This section compares differences between previously released [Pythia v0](https://huggingface.co/models?other=pythia_v0) and the current models. See Appendix B of the Pythia paper for further discussion of these changes and the motivation behind them. We found that retraining Pythia had no impact on benchmark performance. - All model sizes are now trained with uniform batch size of 2M tokens. Previously, the models of size 160M, 410M, and 1.4B parameters were trained with batch sizes of 4M tokens. - We added checkpoints at initialization (step 0) and steps {1,2,4,8,16,32,64, 128,256,512} in addition to every 1000 training steps. - Flash Attention was used in the new retrained suite. - We remedied a minor inconsistency that existed in the original suite: all models of size 2.8B parameters or smaller had a learning rate (LR) schedule which decayed to a minimum LR of 10% the starting LR rate, but the 6.9B and 12B models all used an LR schedule which decayed to a minimum LR of 0. In the redone training runs, we rectified this inconsistency: all models now were trained with LR decaying to a minimum of 0.1× their maximum LR. ### Naming convention and parameter count *Pythia* models were renamed in January 2023. It is possible that the old naming convention still persists in some documentation by accident. The current naming convention (70M, 160M, etc.) is based on total parameter count. <figure style="width:32em"> | current Pythia suffix | old suffix | total params | non-embedding params | | --------------------: | ---------: | -------------: | -------------------: | | 70M | 19M | 70,426,624 | 18,915,328 | | 160M | 125M | 162,322,944 | 85,056,000 | | 410M | 350M | 405,334,016 | 302,311,424 | | 1B | 800M | 1,011,781,632 | 805,736,448 | | 1.4B | 1.3B | 1,414,647,808 | 1,208,602,624 | | 2.8B | 2.7B | 2,775,208,960 | 2,517,652,480 | | 6.9B | 6.7B | 6,857,302,016 | 6,444,163,072 | | 12B | 13B | 11,846,072,320 | 11,327,027,200 | </figure>
Qwen/Qwen1.5-32B
Qwen
"2024-04-05T11:30:24Z"
104,467
68
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "pretrained", "conversational", "en", "license:other", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2024-04-01T06:19:21Z"
--- license: other license_name: tongyi-qianwen-research license_link: >- https://huggingface.co/Qwen/Qwen1.5-32B/blob/main/LICENSE language: - en pipeline_tag: text-generation tags: - pretrained --- # Qwen1.5-32B ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in Chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of `trust_remote_code`. For more details, please refer to our [blog post](https://qwenlm.github.io/blog/qwen1.5/) and [GitHub repo](https://github.com/QwenLM/Qwen1.5). ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention. ## Requirements The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install `transformers>=4.37.0`, or you might encounter the following error: ``` KeyError: 'qwen2'. ``` ## Usage We do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model. ## Citation If you find our work helpful, feel free to give us a cite. ``` @article{qwen, title={Qwen Technical Report}, author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu}, journal={arXiv preprint arXiv:2309.16609}, year={2023} } ```
amazon/chronos-t5-small
amazon
"2024-04-08T11:52:22Z"
103,645
8
transformers
[ "transformers", "safetensors", "t5", "text2text-generation", "time series", "forecasting", "pretrained models", "foundation models", "time series foundation models", "time-series", "other", "arxiv:2403.07815", "arxiv:1910.10683", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
other
"2024-02-21T10:06:21Z"
--- license: apache-2.0 pipeline_tag: other tags: - time series - forecasting - pretrained models - foundation models - time series foundation models - time-series --- # Chronos-T5 (Small) Chronos is a family of **pretrained time series forecasting models** based on language model architectures. A time series is transformed into a sequence of tokens via scaling and quantization, and a language model is trained on these tokens using the cross-entropy loss. Once trained, probabilistic forecasts are obtained by sampling multiple future trajectories given the historical context. Chronos models have been trained on a large corpus of publicly available time series data, as well as synthetic data generated using Gaussian processes. For details on Chronos models, training data and procedures, and experimental results, please refer to the paper [Chronos: Learning the Language of Time Series](https://arxiv.org/abs/2403.07815). <p align="center"> <img src="figures/main-figure.png" width="100%"> <br /> <span> Fig. 1: High-level depiction of Chronos. (<b>Left</b>) The input time series is scaled and quantized to obtain a sequence of tokens. (<b>Center</b>) The tokens are fed into a language model which may either be an encoder-decoder or a decoder-only model. The model is trained using the cross-entropy loss. (<b>Right</b>) During inference, we autoregressively sample tokens from the model and map them back to numerical values. Multiple trajectories are sampled to obtain a predictive distribution. </span> </p> --- ## Architecture The models in this repository are based on the [T5 architecture](https://arxiv.org/abs/1910.10683). The only difference is in the vocabulary size: Chronos-T5 models use 4096 different tokens, compared to 32128 of the original T5 models, resulting in fewer parameters. | Model | Parameters | Based on | | ---------------------------------------------------------------------- | ---------- | ---------------------------------------------------------------------- | | [**chronos-t5-tiny**](https://huggingface.co/amazon/chronos-t5-tiny) | 8M | [t5-efficient-tiny](https://huggingface.co/google/t5-efficient-tiny) | | [**chronos-t5-mini**](https://huggingface.co/amazon/chronos-t5-mini) | 20M | [t5-efficient-mini](https://huggingface.co/google/t5-efficient-mini) | | [**chronos-t5-small**](https://huggingface.co/amazon/chronos-t5-small) | 46M | [t5-efficient-small](https://huggingface.co/google/t5-efficient-small) | | [**chronos-t5-base**](https://huggingface.co/amazon/chronos-t5-base) | 200M | [t5-efficient-base](https://huggingface.co/google/t5-efficient-base) | | [**chronos-t5-large**](https://huggingface.co/amazon/chronos-t5-large) | 710M | [t5-efficient-large](https://huggingface.co/google/t5-efficient-large) | ## Usage To perform inference with Chronos models, install the package in the GitHub [companion repo](https://github.com/amazon-science/chronos-forecasting) by running: ``` pip install git+https://github.com/amazon-science/chronos-forecasting.git ``` A minimal example showing how to perform inference using Chronos models: ```python import matplotlib.pyplot as plt import numpy as np import pandas as pd import torch from chronos import ChronosPipeline pipeline = ChronosPipeline.from_pretrained( "amazon/chronos-t5-small", device_map="cuda", torch_dtype=torch.bfloat16, ) df = pd.read_csv("https://raw.githubusercontent.com/AileenNielsen/TimeSeriesAnalysisWithPython/master/data/AirPassengers.csv") # context must be either a 1D tensor, a list of 1D tensors, # or a left-padded 2D tensor with batch as the first dimension context = torch.tensor(df["#Passengers"]) prediction_length = 12 forecast = pipeline.predict(context, prediction_length) # shape [num_series, num_samples, prediction_length] # visualize the forecast forecast_index = range(len(df), len(df) + prediction_length) low, median, high = np.quantile(forecast[0].numpy(), [0.1, 0.5, 0.9], axis=0) plt.figure(figsize=(8, 4)) plt.plot(df["#Passengers"], color="royalblue", label="historical data") plt.plot(forecast_index, median, color="tomato", label="median forecast") plt.fill_between(forecast_index, low, high, color="tomato", alpha=0.3, label="80% prediction interval") plt.legend() plt.grid() plt.show() ``` ## Citation If you find Chronos models useful for your research, please consider citing the associated [paper](https://arxiv.org/abs/2403.07815): ``` @article{ansari2024chronos, author = {Ansari, Abdul Fatir and Stella, Lorenzo and Turkmen, Caner and Zhang, Xiyuan, and Mercado, Pedro and Shen, Huibin and Shchur, Oleksandr and Rangapuram, Syama Syndar and Pineda Arango, Sebastian and Kapoor, Shubham and Zschiegner, Jasper and Maddix, Danielle C. and Mahoney, Michael W. and Torkkola, Kari and Gordon Wilson, Andrew and Bohlke-Schneider, Michael and Wang, Yuyang}, title = {Chronos: Learning the Language of Time Series}, journal = {arXiv preprint arXiv:2403.07815}, year = {2024} } ``` ## Security See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information. ## License This project is licensed under the Apache-2.0 License.
vikp/surya_layout
vikp
"2024-03-26T20:28:15Z"
102,855
2
transformers
[ "transformers", "safetensors", "segformer", "license:cc-by-nc-sa-4.0", "endpoints_compatible", "region:us" ]
null
"2024-02-29T20:56:42Z"
--- license: cc-by-nc-sa-4.0 --- Layout model for [surya](https://github.com/VikParuchuri/surya).
tohoku-nlp/bert-base-japanese-char
tohoku-nlp
"2024-02-22T00:57:58Z"
102,747
7
transformers
[ "transformers", "pytorch", "tf", "jax", "bert", "fill-mask", "ja", "dataset:wikipedia", "license:cc-by-sa-4.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
"2022-03-02T23:29:05Z"
--- language: ja license: cc-by-sa-4.0 datasets: - wikipedia widget: - text: 仙台は「[MASK]の都」と呼ばれている。 --- # BERT base Japanese (character tokenization) This is a [BERT](https://github.com/google-research/bert) model pretrained on texts in the Japanese language. This version of the model processes input texts with word-level tokenization based on the IPA dictionary, followed by character-level tokenization. The codes for the pretraining are available at [cl-tohoku/bert-japanese](https://github.com/cl-tohoku/bert-japanese/tree/v1.0). ## Model architecture The model architecture is the same as the original BERT base model; 12 layers, 768 dimensions of hidden states, and 12 attention heads. ## Training Data The model is trained on Japanese Wikipedia as of September 1, 2019. To generate the training corpus, [WikiExtractor](https://github.com/attardi/wikiextractor) is used to extract plain texts from a dump file of Wikipedia articles. The text files used for the training are 2.6GB in size, consisting of approximately 17M sentences. ## Tokenization The texts are first tokenized by [MeCab](https://taku910.github.io/mecab/) morphological parser with the IPA dictionary and then split into characters. The vocabulary size is 4000. ## Training The model is trained with the same configuration as the original BERT; 512 tokens per instance, 256 instances per batch, and 1M training steps. ## Licenses The pretrained models are distributed under the terms of the [Creative Commons Attribution-ShareAlike 3.0](https://creativecommons.org/licenses/by-sa/3.0/). ## Acknowledgments For training models, we used Cloud TPUs provided by [TensorFlow Research Cloud](https://www.tensorflow.org/tfrc/) program.
vikp/column_detector
vikp
"2023-12-22T05:55:14Z"
102,701
9
transformers
[ "transformers", "pytorch", "layoutlmv3", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
"2023-11-22T05:53:47Z"
Detects number of columns in pdf page images. Based on layoutlmv3. Used in [marker](https://github.com/VikParuchuri/marker).
stabilityai/stable-video-diffusion-img2vid-xt
stabilityai
"2024-04-12T08:44:58Z"
102,612
2,162
diffusers
[ "diffusers", "safetensors", "image-to-video", "license:other", "has_space", "diffusers:StableVideoDiffusionPipeline", "region:us" ]
image-to-video
"2023-11-20T23:45:55Z"
--- pipeline_tag: image-to-video license: other license_name: stable-video-diffusion-nc-community license_link: LICENSE --- # Stable Video Diffusion Image-to-Video Model Card <!-- Provide a quick summary of what the model is/does. --> ![row01](output_tile.gif) Stable Video Diffusion (SVD) Image-to-Video is a diffusion model that takes in a still image as a conditioning frame, and generates a video from it. Please note: For commercial use, please refer to https://stability.ai/membership. ## Model Details ### Model Description (SVD) Image-to-Video is a latent diffusion model trained to generate short video clips from an image conditioning. This model was trained to generate 25 frames at resolution 576x1024 given a context frame of the same size, finetuned from [SVD Image-to-Video [14 frames]](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid). We also finetune the widely used [f8-decoder](https://huggingface.co/docs/diffusers/api/models/autoencoderkl#loading-from-the-original-format) for temporal consistency. For convenience, we additionally provide the model with the standard frame-wise decoder [here](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/blob/main/svd_xt_image_decoder.safetensors). - **Developed by:** Stability AI - **Funded by:** Stability AI - **Model type:** Generative image-to-video model - **Finetuned from model:** SVD Image-to-Video [14 frames] ### Model Sources For research purposes, we recommend our `generative-models` Github repository (https://github.com/Stability-AI/generative-models), which implements the most popular diffusion frameworks (both training and inference). - **Repository:** https://github.com/Stability-AI/generative-models - **Paper:** https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets ## Evaluation ![comparison](comparison.png) The chart above evaluates user preference for SVD-Image-to-Video over [GEN-2](https://research.runwayml.com/gen2) and [PikaLabs](https://www.pika.art/). SVD-Image-to-Video is preferred by human voters in terms of video quality. For details on the user study, we refer to the [research paper](https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets) ## Uses ### Direct Use The model is intended for both non-commercial and commercial usage. You can use this model for non-commercial or research purposes under this [license](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/blob/main/LICENSE). Possible research areas and tasks include - Research on generative models. - Safe deployment of models which have the potential to generate harmful content. - Probing and understanding the limitations and biases of generative models. - Generation of artworks and use in design and other artistic processes. - Applications in educational or creative tools. For commercial use, please refer to https://stability.ai/membership. Excluded uses are described below. ### Out-of-Scope Use The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model. The model should not be used in any way that violates Stability AI's [Acceptable Use Policy](https://stability.ai/use-policy). ## Limitations and Bias ### Limitations - The generated videos are rather short (<= 4sec), and the model does not achieve perfect photorealism. - The model may generate videos without motion, or very slow camera pans. - The model cannot be controlled through text. - The model cannot render legible text. - Faces and people in general may not be generated properly. - The autoencoding part of the model is lossy. ### Recommendations The model is intended for both non-commercial and commercial usage. ## How to Get Started with the Model Check out https://github.com/Stability-AI/generative-models # Appendix: All considered potential data sources were included for final training, with none held out as the proposed data filtering methods described in the SVD paper handle the quality control/filtering of the dataset. With regards to safety/NSFW filtering, sources considered were either deemed safe or filtered with the in-house NSFW filters. No explicit human labor is involved in training data preparation. However, human evaluation for model outputs and quality was extensively used to evaluate model quality and performance. The evaluations were performed with third-party contractor platforms (Amazon Sagemaker, Amazon Mechanical Turk, Prolific) with fluent English-speaking contractors from various countries, primarily from the USA, UK, and Canada. Each worker was paid $12/hr for the time invested in the evaluation. No other third party was involved in the development of this model; the model was fully developed in-house at Stability AI. Training the SVD checkpoints required a total of approximately 200,000 A100 80GB hours. The majority of the training occurred on 48 * 8 A100s, while some stages took more/less than that. The resulting CO2 emission is ~19,000kg CO2 eq., and energy consumed is ~64000 kWh. The released checkpoints (SVD/SVD-XT) are image-to-video models that generate short videos/animations closely following the given input image. Since the model relies on an existing supplied image, the risk of disclosing specific material or novel unsafe content is minimal. This was also evaluated by third-party independent red-teaming services, which agree with our conclusion to a high degree of confidence (>90% in various areas of safety red-teaming). The external evaluations were also performed for trustworthiness, leading to >95% confidence in real, trustworthy videos. With the default settings at the time of release, SVD takes ~100s for generation, and SVD-XT takes ~180s on an A100 80GB card. Several optimizations to trade off quality / memory / speed can be done to perform faster inference or inference on lower VRAM cards. The information related to the model and its development process and usage protocols can be found in the GitHub repo, associated research paper, and HuggingFace model page/cards. The released model inference & demo code has image-level watermarking enabled by default, which can be used to detect the outputs. This is done via the imWatermark Python library. The model can be used to generate videos from static initial images. However, we prohibit unlawful, obscene, or misleading uses of the model consistent with the terms of our license. For the open-weights release, our training data filtering mitigations alleviate this to some extent. These restrictions are explicitly enforced on user-facing interfaces at stablevideo.com, where a warning is issued. We do not take any responsibility for third-party interfaces. Submitting initial images that bypass input filters to tease out offensive or inappropriate content listed above is also prohibited. Safety filtering checks at stablevideo.com run on model inputs and outputs independently. More details on our user-facing interfaces can be found here: https://www.stablevideo.com/faq For stablevideo.com, we store preference data in the form of upvotes/downvotes on user-generated videos, and we have a pairwise ranker that runs while a user generates videos. This usage data is solely used for improving Stability AI’s future image/video models and services. No other third-party entities are given access to the usage data beyond Stability AI and maintainers of stablevideo.com. For usage statistics of SVD, we refer interested users to HuggingFace model download/usage statistics as a primary indicator. Third-party applications also have reported model usage statistics. We might also consider releasing aggregate usage statistics of stablevideo.com on reaching some milestones.
HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit
HuggingFaceM4
"2024-03-07T22:05:47Z"
102,603
15
transformers
[ "transformers", "safetensors", "siglip", "zero-shot-image-classification", "custom_code", "arxiv:2307.06304", "license:apache-2.0", "endpoints_compatible", "region:us" ]
zero-shot-image-classification
"2024-01-30T19:31:08Z"
--- license: apache-2.0 --- Same as https://huggingface.co/HuggingFaceM4/siglip-so400m-14-384-flash-attn2 with two changes: - increase max resolution to 980 x 980 (instead of 384 x 384) by interpolating the position embeddings - implement the strategy in [NaViT](https://arxiv.org/abs/2307.06304) to allow a/ variable resoltion images, b/ aspect ratio preserved images These changes only apply to the vision tower. No changes to the text tower. Implementation is fully backward compatible to `https://huggingface.co/HuggingFaceM4/siglip-so400m-14-384-flash-attn2` -> just don't specify the `patch_attention_mask` Usage: ```python import torch from modeling_siglip import SiglipVisionModel DEVICE = torch.device("cuda:0") PATCH_SIZE = 14 pixel_values = torch.randn(2, 3, 28, 42, dtype=torch.bfloat16, device=DEVICE) pixel_attention_mask = [ [ [1] * 14 + [1] * 14 + [1] * 14, [1] * 14 + [1] * 14 + [1] * 14, [1] * 14 + [1] * 14 + [1] * 14, [1] * 14 + [1] * 14 + [1] * 14, [1] * 14 + [1] * 14 + [1] * 14, [1] * 14 + [1] * 14 + [1] * 14, [1] * 14 + [1] * 14 + [1] * 14, [1] * 14 + [1] * 14 + [1] * 14, [1] * 14 + [1] * 14 + [1] * 14, [1] * 14 + [1] * 14 + [1] * 14, [1] * 14 + [1] * 14 + [1] * 14, [1] * 14 + [1] * 14 + [1] * 14, [1] * 14 + [1] * 14 + [1] * 14, [1] * 14 + [1] * 14 + [1] * 14, [0] * 14 + [0] * 14 + [0] * 14, [0] * 14 + [0] * 14 + [0] * 14, [0] * 14 + [0] * 14 + [0] * 14, [0] * 14 + [0] * 14 + [0] * 14, [0] * 14 + [0] * 14 + [0] * 14, [0] * 14 + [0] * 14 + [0] * 14, [0] * 14 + [0] * 14 + [0] * 14, [0] * 14 + [0] * 14 + [0] * 14, [0] * 14 + [0] * 14 + [0] * 14, [0] * 14 + [0] * 14 + [0] * 14, [0] * 14 + [0] * 14 + [0] * 14, [0] * 14 + [0] * 14 + [0] * 14, [0] * 14 + [0] * 14 + [0] * 14, [0] * 14 + [0] * 14 + [0] * 14, ], [ [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, [1] * 14 + [1] * 14 + [0] * 14, ], ] pixel_attention_mask = torch.tensor(pixel_attention_mask, dtype=torch.bool, device=DEVICE) patches_subgrid = pixel_attention_mask.unfold( dimension=1, size=PATCH_SIZE, step=PATCH_SIZE ).unfold(dimension=2, size=PATCH_SIZE, step=PATCH_SIZE) patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) > 0).bool() model = SiglipVisionModel.from_pretrained("HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit", _flash_attn_2_enabled=True) model.train() model.vision_model.to(DEVICE, dtype=torch.bfloat16) output = model.vision_model(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask) ```
timm/coatnet_0_rw_224.sw_in1k
timm
"2023-05-10T23:41:56Z"
102,493
0
timm
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "arxiv:2201.03545", "license:apache-2.0", "region:us" ]
image-classification
"2023-01-20T21:24:19Z"
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for coatnet_0_rw_224.sw_in1k A timm specific CoAtNet image classification model. Trained in `timm` on ImageNet-1k by Ross Wightman. ImageNet-1k training done on TPUs thanks to support of the [TRC](https://sites.research.google/trc/about/) program. ### Model Variants in [maxxvit.py](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/maxxvit.py) MaxxViT covers a number of related model architectures that share a common structure including: - CoAtNet - Combining MBConv (depthwise-separable) convolutional blocks in early stages with self-attention transformer blocks in later stages. - MaxViT - Uniform blocks across all stages, each containing a MBConv (depthwise-separable) convolution block followed by two self-attention blocks with different partitioning schemes (window followed by grid). - CoAtNeXt - A timm specific arch that uses ConvNeXt blocks in place of MBConv blocks in CoAtNet. All normalization layers are LayerNorm (no BatchNorm). - MaxxViT - A timm specific arch that uses ConvNeXt blocks in place of MBConv blocks in MaxViT. All normalization layers are LayerNorm (no BatchNorm). - MaxxViT-V2 - A MaxxViT variation that removes the window block attention leaving only ConvNeXt blocks and grid attention w/ more width to compensate. Aside from the major variants listed above, there are more subtle changes from model to model. Any model name with the string `rw` are `timm` specific configs w/ modelling adjustments made to favour PyTorch eager use. These were created while training initial reproductions of the models so there are variations. All models with the string `tf` are models exactly matching Tensorflow based models by the original paper authors with weights ported to PyTorch. This covers a number of MaxViT models. The official CoAtNet models were never released. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 27.4 - GMACs: 4.4 - Activations (M): 18.7 - Image size: 224 x 224 - **Papers:** - CoAtNet: Marrying Convolution and Attention for All Data Sizes: https://arxiv.org/abs/2201.03545 - **Dataset:** ImageNet-1k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('coatnet_0_rw_224.sw_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'coatnet_0_rw_224.sw_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 96, 56, 56]) # torch.Size([1, 192, 28, 28]) # torch.Size([1, 384, 14, 14]) # torch.Size([1, 768, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'coatnet_0_rw_224.sw_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 768, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison ### By Top-1 |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| ### By Throughput (samples / sec) |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| ## Citation ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @article{tu2022maxvit, title={MaxViT: Multi-Axis Vision Transformer}, author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao}, journal={ECCV}, year={2022}, } ``` ```bibtex @article{dai2021coatnet, title={CoAtNet: Marrying Convolution and Attention for All Data Sizes}, author={Dai, Zihang and Liu, Hanxiao and Le, Quoc V and Tan, Mingxing}, journal={arXiv preprint arXiv:2106.04803}, year={2021} } ```
Systran/faster-whisper-tiny
Systran
"2023-11-23T10:42:55Z"
102,050
1
ctranslate2
[ "ctranslate2", "audio", "automatic-speech-recognition", "en", "zh", "de", "es", "ru", "ko", "fr", "ja", "pt", "tr", "pl", "ca", "nl", "ar", "sv", "it", "id", "hi", "fi", "vi", "he", "uk", "el", "ms", "cs", "ro", "da", "hu", "ta", "no", "th", "ur", "hr", "bg", "lt", "la", "mi", "ml", "cy", "sk", "te", "fa", "lv", "bn", "sr", "az", "sl", "kn", "et", "mk", "br", "eu", "is", "hy", "ne", "mn", "bs", "kk", "sq", "sw", "gl", "mr", "pa", "si", "km", "sn", "yo", "so", "af", "oc", "ka", "be", "tg", "sd", "gu", "am", "yi", "lo", "uz", "fo", "ht", "ps", "tk", "nn", "mt", "sa", "lb", "my", "bo", "tl", "mg", "as", "tt", "haw", "ln", "ha", "ba", "jw", "su", "license:mit", "region:us" ]
automatic-speech-recognition
"2023-11-23T09:53:30Z"
--- language: - en - zh - de - es - ru - ko - fr - ja - pt - tr - pl - ca - nl - ar - sv - it - id - hi - fi - vi - he - uk - el - ms - cs - ro - da - hu - ta - 'no' - th - ur - hr - bg - lt - la - mi - ml - cy - sk - te - fa - lv - bn - sr - az - sl - kn - et - mk - br - eu - is - hy - ne - mn - bs - kk - sq - sw - gl - mr - pa - si - km - sn - yo - so - af - oc - ka - be - tg - sd - gu - am - yi - lo - uz - fo - ht - ps - tk - nn - mt - sa - lb - my - bo - tl - mg - as - tt - haw - ln - ha - ba - jw - su tags: - audio - automatic-speech-recognition license: mit library_name: ctranslate2 --- # Whisper tiny model for CTranslate2 This repository contains the conversion of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) to the [CTranslate2](https://github.com/OpenNMT/CTranslate2) model format. This model can be used in CTranslate2 or projects based on CTranslate2 such as [faster-whisper](https://github.com/systran/faster-whisper). ## Example ```python from faster_whisper import WhisperModel model = WhisperModel("tiny") segments, info = model.transcribe("audio.mp3") for segment in segments: print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text)) ``` ## Conversion details The original model was converted with the following command: ``` ct2-transformers-converter --model openai/whisper-tiny --output_dir faster-whisper-tiny \ --copy_files tokenizer.json --quantization float16 ``` Note that the model weights are saved in FP16. This type can be changed when the model is loaded using the [`compute_type` option in CTranslate2](https://opennmt.net/CTranslate2/quantization.html). ## More information **For more information about the original model, see its [model card](https://huggingface.co/openai/whisper-tiny).**
microsoft/deberta-v3-small
microsoft
"2022-09-26T08:59:13Z"
101,993
42
transformers
[ "transformers", "pytorch", "tf", "deberta-v2", "deberta", "deberta-v3", "fill-mask", "en", "arxiv:2006.03654", "arxiv:2111.09543", "license:mit", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
"2022-03-02T23:29:05Z"
--- language: en tags: - deberta - deberta-v3 - fill-mask thumbnail: https://huggingface.co/front/thumbnails/microsoft.png license: mit --- ## DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing [DeBERTa](https://arxiv.org/abs/2006.03654) improves the BERT and RoBERTa models using disentangled attention and enhanced mask decoder. With those two improvements, DeBERTa out perform RoBERTa on a majority of NLU tasks with 80GB training data. In [DeBERTa V3](https://arxiv.org/abs/2111.09543), we further improved the efficiency of DeBERTa using ELECTRA-Style pre-training with Gradient Disentangled Embedding Sharing. Compared to DeBERTa, our V3 version significantly improves the model performance on downstream tasks. You can find more technique details about the new model from our [paper](https://arxiv.org/abs/2111.09543). Please check the [official repository](https://github.com/microsoft/DeBERTa) for more implementation details and updates. The DeBERTa V3 small model comes with 6 layers and a hidden size of 768. It has **44M** backbone parameters with a vocabulary containing 128K tokens which introduces 98M parameters in the Embedding layer. This model was trained using the 160GB data as DeBERTa V2. #### Fine-tuning on NLU tasks We present the dev results on SQuAD 2.0 and MNLI tasks. | Model |Vocabulary(K)|Backbone #Params(M)| SQuAD 2.0(F1/EM) | MNLI-m/mm(ACC)| |-------------------|----------|-------------------|-----------|----------| | RoBERTa-base |50 |86 | 83.7/80.5 | 87.6/- | | XLNet-base |32 |92 | -/80.2 | 86.8/- | | ELECTRA-base |30 |86 | -/80.5 | 88.8/ | | DeBERTa-base |50 |100 | 86.2/83.1| 88.8/88.5| | DeBERTa-v3-large|128|304 | 91.5/89.0 | 91.8/91.9 | | DeBERTa-v3-base |128|86 | 88.4/85.4 | 90.6/90.7| | **DeBERTa-v3-small** |128|**44** | **82.8/80.4** | **88.3/87.7**| | DeBERTa-v3-small+SiFT|128|22 | -/- | 88.8/88.5| #### Fine-tuning with HF transformers ```bash #!/bin/bash cd transformers/examples/pytorch/text-classification/ pip install datasets export TASK_NAME=mnli output_dir="ds_results" num_gpus=8 batch_size=8 python -m torch.distributed.launch --nproc_per_node=${num_gpus} \ run_glue.py \ --model_name_or_path microsoft/deberta-v3-small \ --task_name $TASK_NAME \ --do_train \ --do_eval \ --evaluation_strategy steps \ --max_seq_length 256 \ --warmup_steps 1500 \ --per_device_train_batch_size ${batch_size} \ --learning_rate 4.5e-5 \ --num_train_epochs 3 \ --output_dir $output_dir \ --overwrite_output_dir \ --logging_steps 1000 \ --logging_dir $output_dir ``` ### Citation If you find DeBERTa useful for your work, please cite the following papers: ``` latex @misc{he2021debertav3, title={DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing}, author={Pengcheng He and Jianfeng Gao and Weizhu Chen}, year={2021}, eprint={2111.09543}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ``` latex @inproceedings{ he2021deberta, title={DEBERTA: DECODING-ENHANCED BERT WITH DISENTANGLED ATTENTION}, author={Pengcheng He and Xiaodong Liu and Jianfeng Gao and Weizhu Chen}, booktitle={International Conference on Learning Representations}, year={2021}, url={https://openreview.net/forum?id=XPZIaotutsD} } ```
KBLab/sentence-bert-swedish-cased
KBLab
"2023-07-18T09:57:37Z"
101,862
22
sentence-transformers
[ "sentence-transformers", "pytorch", "bert", "feature-extraction", "sentence-similarity", "transformers", "sv", "arxiv:2004.09813", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
sentence-similarity
"2022-03-02T23:29:04Z"
--- pipeline_tag: sentence-similarity lang: - sv tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers widget: - source_sentence: Mannen åt mat. sentences: - Han förtärde en närande och nyttig måltid. - Det var ett sunkigt hak med ganska gott käk. - Han inmundigade middagen tillsammans med ett glas rödvin. - Potatischips är jättegoda. - Tryck på knappen för att få tala med kundsupporten. example_title: Mat - source_sentence: Kan jag deklarera digitalt från utlandet? sentences: - Du som befinner dig i utlandet kan deklarera digitalt på flera olika sätt. - >- Du som har kvarskatt att betala ska göra en inbetalning till ditt skattekonto. - >- Efter att du har deklarerat går vi igenom uppgifterna i din deklaration och räknar ut din skatt. - >- I din deklaration som du får från oss har vi räknat ut vad du ska betala eller få tillbaka. - Tryck på knappen för att få tala med kundsupporten. example_title: Skatteverket FAQ - source_sentence: Hon kunde göra bakåtvolter. sentences: - Hon var atletisk. - Hon var bra på gymnastik. - Hon var inte atletisk. - Hon var oförmögen att flippa baklänges. example_title: Gymnastik license: apache-2.0 language: - sv --- # KBLab/sentence-bert-swedish-cased This is a [sentence-transformers](https://www.SBERT.net) model: It maps Swedish sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. This model is a bilingual Swedish-English model trained according to instructions in the paper [Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation](https://arxiv.org/pdf/2004.09813.pdf) and the [documentation](https://www.sbert.net/examples/training/multilingual/README.html) accompanying its companion python package. We have used the strongest available pretrained English Bi-Encoder ([all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2)) as a teacher model, and the pretrained Swedish [KB-BERT](https://huggingface.co/KB/bert-base-swedish-cased) as the student model. A more detailed description of the model can be found in an article we published on the KBLab blog [here](https://kb-labb.github.io/posts/2021-08-23-a-swedish-sentence-transformer/) and for the updated model [here](https://kb-labb.github.io/posts/2023-01-16-sentence-transformer-20/). **Update**: We have released updated versions of the model since the initial release. The original model described in the blog post is **v1.0**. The current version is **v2.0**. The newer versions are trained on longer paragraphs, and have a longer max sequence length. **v2.0** is trained with a stronger teacher model and is the current default. | Model version | Teacher Model | Max Sequence Length | |---------------|---------|----------| | v1.0 | [paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) | 256 | | v1.1 | [paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) | 384 | | v2.0 | [all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) | 384 | <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["Det här är en exempelmening", "Varje exempel blir konverterad"] model = SentenceTransformer('KBLab/sentence-bert-swedish-cased') embeddings = model.encode(sentences) print(embeddings) ``` ### Loading an older model version (Sentence-Transformers) Currently, the easiest way to load an older model version is to clone the model repository and load it from disk. For example, to clone the **v1.0** model: ```bash git clone --depth 1 --branch v1.0 https://huggingface.co/KBLab/sentence-bert-swedish-cased ``` Then you can load the model by pointing to the local folder where you cloned the model: ```python from sentence_transformers import SentenceTransformer model = SentenceTransformer("path_to_model_folder/sentence-bert-swedish-cased") ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['Det här är en exempelmening', 'Varje exempel blir konverterad'] # Load model from HuggingFace Hub # To load an older version, e.g. v1.0, add the argument revision="v1.0" tokenizer = AutoTokenizer.from_pretrained('KBLab/sentence-bert-swedish-cased') model = AutoModel.from_pretrained('KBLab/sentence-bert-swedish-cased') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, max pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ### Loading an older model (Hugginfface Transformers) To load an older model specify the version tag with the `revision` arg. For example, to load the **v1.0** model, use the following code: ```python AutoTokenizer.from_pretrained('KBLab/sentence-bert-swedish-cased', revision="v1.0") AutoModel.from_pretrained('KBLab/sentence-bert-swedish-cased', revision="v1.0") ``` ## Evaluation Results <!--- Describe how your model was evaluated --> The model was evaluated on [SweParaphrase v1.0](https://spraakbanken.gu.se/en/resources/sweparaphrase) and **SweParaphrase v2.0**. This test set is part of [SuperLim](https://spraakbanken.gu.se/en/resources/superlim) -- a Swedish evaluation suite for natural langage understanding tasks. We calculated Pearson and Spearman correlation between predicted model similarity scores and the human similarity score labels. Results from **SweParaphrase v1.0** are displayed below. | Model version | Pearson | Spearman | |---------------|---------|----------| | v1.0 | 0.9183 | 0.9114 | | v1.1 | 0.9183 | 0.9114 | | v2.0 | **0.9283** | **0.9130** | The following code snippet can be used to reproduce the above results: ```python from sentence_transformers import SentenceTransformer import pandas as pd df = pd.read_csv( "sweparaphrase-dev-165.csv", sep="\t", header=None, names=[ "original_id", "source", "type", "sentence_swe1", "sentence_swe2", "score", "sentence1", "sentence2", ], ) model = SentenceTransformer("KBLab/sentence-bert-swedish-cased") sentences1 = df["sentence_swe1"].tolist() sentences2 = df["sentence_swe2"].tolist() # Compute embedding for both lists embeddings1 = model.encode(sentences1, convert_to_tensor=True) embeddings2 = model.encode(sentences2, convert_to_tensor=True) # Compute cosine similarity after normalizing embeddings1 /= embeddings1.norm(dim=-1, keepdim=True) embeddings2 /= embeddings2.norm(dim=-1, keepdim=True) cosine_scores = embeddings1 @ embeddings2.t() sentence_pair_scores = cosine_scores.diag() df["model_score"] = sentence_pair_scores.cpu().tolist() print(df[["score", "model_score"]].corr(method="spearman")) print(df[["score", "model_score"]].corr(method="pearson")) ``` ### Sweparaphrase v2.0 In general, **v1.1** correlates the most with human assessment of text similarity on SweParaphrase v2.0. Below, we present zero-shot evaluation results on all data splits. They display the model's performance out of the box, without any fine-tuning. | Model version | Data split | Pearson | Spearman | |---------------|------------|------------|------------| | v1.0 | train | 0.8355 | 0.8256 | | v1.1 | train | **0.8383** | **0.8302** | | v2.0 | train | 0.8209 | 0.8059 | | v1.0 | dev | 0.8682 | 0.8774 | | v1.1 | dev | **0.8739** | **0.8833** | | v2.0 | dev | 0.8638 | 0.8668 | | v1.0 | test | 0.8356 | 0.8476 | | v1.1 | test | **0.8393** | **0.8550** | | v2.0 | test | 0.8232 | 0.8213 | ### SweFAQ v2.0 When it comes to retrieval tasks, **v2.0** performs the best by quite a substantial margin. It is better at matching the correct answer to a question compared to v1.1 and v1.0. | Model version | Data split | Accuracy | |---------------|------------|------------| | v1.0 | train | 0.5262 | | v1.1 | train | 0.6236 | | v2.0 | train | **0.7106** | | v1.0 | dev | 0.4636 | | v1.1 | dev | 0.5818 | | v2.0 | dev | **0.6727** | | v1.0 | test | 0.4495 | | v1.1 | test | 0.5229 | | v2.0 | test | **0.5871** | Examples how to evaluate the models on some of the test sets of the SuperLim suites can be found on the following links: [evaluate_faq.py](https://github.com/kb-labb/swedish-sbert/blob/main/evaluate_faq.py) (Swedish FAQ), [evaluate_swesat.py](https://github.com/kb-labb/swedish-sbert/blob/main/evaluate_swesat.py) (SweSAT synonyms), [evaluate_supersim.py](https://github.com/kb-labb/swedish-sbert/blob/main/evaluate_supersim.py) (SuperSim). ## Training An article with more details on data and v1.0 of the model can be found on the [KBLab blog](https://kb-labb.github.io/posts/2021-08-23-a-swedish-sentence-transformer/). Around 14.6 million sentences from English-Swedish parallel corpuses were used to train the model. Data was sourced from the [Open Parallel Corpus](https://opus.nlpl.eu/) (OPUS) and downloaded via the python package [opustools](https://pypi.org/project/opustools/). Datasets used were: JW300, Europarl, DGT-TM, EMEA, ELITR-ECA, TED2020, Tatoeba and OpenSubtitles. The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 180513 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MSELoss.MSELoss` Parameters of the fit()-Method: ``` { "epochs": 2, "evaluation_steps": 1000, "evaluator": "sentence_transformers.evaluation.SequentialEvaluator.SequentialEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "eps": 1e-06, "lr": 8e-06 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 5000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 384, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information --> This model was trained by KBLab, a data lab at the National Library of Sweden. You can cite the article on our blog: https://kb-labb.github.io/posts/2021-08-23-a-swedish-sentence-transformer/ . ``` @misc{rekathati2021introducing, author = {Rekathati, Faton}, title = {The KBLab Blog: Introducing a Swedish Sentence Transformer}, url = {https://kb-labb.github.io/posts/2021-08-23-a-swedish-sentence-transformer/}, year = {2021} } ``` ## Acknowledgements We gratefully acknowledge the HPC RIVR consortium ([www.hpc-rivr.si](https://www.hpc-rivr.si/)) and EuroHPC JU ([eurohpc-ju.europa.eu/](https://eurohpc-ju.europa.eu/)) for funding this research by providing computing resources of the HPC system Vega at the Institute of Information Science ([www.izum.si](https://www.izum.si/)).
fxmarty/pix2struct-tiny-random
fxmarty
"2023-06-01T09:47:36Z"
101,321
1
transformers
[ "transformers", "pytorch", "pix2struct", "text2text-generation", "image-to-text", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-to-text
"2023-06-01T09:33:18Z"
--- license: mit pipeline_tag: image-to-text ---
vikp/layout_segmenter
vikp
"2023-12-22T05:54:58Z"
101,123
10
transformers
[ "transformers", "pytorch", "layoutlmv3", "token-classification", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
token-classification
"2023-11-23T16:03:49Z"
Segments pdf page layout into blocks. Based on layoutlmv3. Used in [marker](https://github.com/VikParuchuri/marker).
microsoft/llmlingua-2-xlm-roberta-large-meetingbank
microsoft
"2024-04-03T08:40:24Z"
100,781
9
transformers
[ "transformers", "safetensors", "xlm-roberta", "token-classification", "arxiv:2403.12968", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
token-classification
"2024-03-17T07:04:34Z"
--- license: mit --- # LLMLingua-2-Bert-base-Multilingual-Cased-MeetingBank This model was introduced in the paper [**LLMLingua-2: Data Distillation for Efficient and Faithful Task-Agnostic Prompt Compression** (Pan et al, 2024)](https://arxiv.org/abs/2403.12968). It is a [XLM-RoBERTa (large-sized model)](https://huggingface.co/FacebookAI/xlm-roberta-large) finetuned to perform token classification for task agnostic prompt compression. The probability $p_{preserve}$ of each token $x_i$ is used as the metric for compression. This model is trained on [an extractive text compression dataset(will public)]() constructed with the methodology proposed in the [**LLMLingua-2**](https://arxiv.org/abs/2403.12968), using training examples from [MeetingBank (Hu et al, 2023)](https://meetingbank.github.io/) as the seed data. For more details, please check the home page of [LLMLingua-2](https://llmlingua.com/llmlingua2.html) and [LLMLingua Series](https://llmlingua.com/). ## Usage ```python from llmlingua import PromptCompressor compressor = PromptCompressor( model_name="microsoft/llmlingua-2-xlm-roberta-large-meetingbank", use_llmlingua2=True ) original_prompt = """John: So, um, I've been thinking about the project, you know, and I believe we need to, uh, make some changes. I mean, we want the project to succeed, right? So, like, I think we should consider maybe revising the timeline. Sarah: I totally agree, John. I mean, we have to be realistic, you know. The timeline is, like, too tight. You know what I mean? We should definitely extend it. """ results = compressor.compress_prompt_llmlingua2( original_prompt, rate=0.6, force_tokens=['\n', '.', '!', '?', ','], chunk_end_tokens=['.', '\n'], return_word_label=True, drop_consecutive=True ) print(results.keys()) print(f"Compressed prompt: {results['compressed_prompt']}") print(f"Original tokens: {results['origin_tokens']}") print(f"Compressed tokens: {results['compressed_tokens']}") print(f"Compression rate: {results['rate']}") # get the annotated results over the original prompt word_sep = "\t\t|\t\t" label_sep = " " lines = results["fn_labeled_original_prompt"].split(word_sep) annotated_results = [] for line in lines: word, label = line.split(label_sep) annotated_results.append((word, '+') if label == '1' else (word, '-')) # list of tuples: (word, label) print("Annotated results:") for word, label in annotated_results[:10]: print(f"{word} {label}") ``` ## Citation ``` @article{wu2024llmlingua2, title = "{LLML}ingua-2: Data Distillation for Efficient and Faithful Task-Agnostic Prompt Compression", author = "Zhuoshi Pan and Qianhui Wu and Huiqiang Jiang and Menglin Xia and Xufang Luo and Jue Zhang and Qingwei Lin and Victor Ruhle and Yuqing Yang and Chin-Yew Lin and H. Vicky Zhao and Lili Qiu and Dongmei Zhang", url = "https://arxiv.org/abs/2403.12968", journal = "ArXiv preprint", volume = "abs/2403.12968", year = "2024", } ```
pierreguillou/ner-bert-base-cased-pt-lenerbr
pierreguillou
"2021-12-29T19:32:39Z"
100,771
11
transformers
[ "transformers", "pytorch", "bert", "token-classification", "generated_from_trainer", "pt", "dataset:lener_br", "model-index", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
token-classification
"2022-03-02T23:29:05Z"
--- language: - pt tags: - generated_from_trainer datasets: - lener_br metrics: - precision - recall - f1 - accuracy model-index: - name: checkpoints results: - task: name: Token Classification type: token-classification dataset: name: lener_br type: lener_br metrics: - name: F1 type: f1 value: 0.8926146010186757 - name: Precision type: precision value: 0.8810222036028488 - name: Recall type: recall value: 0.9045161290322581 - name: Accuracy type: accuracy value: 0.9759397808828684 - name: Loss type: loss value: 0.18803243339061737 widget: - text: "Ao Instituto Médico Legal da jurisdição do acidente ou da residência cumpre fornecer, no prazo de 90 dias, laudo à vítima (art. 5, § 5, Lei n. 6.194/74 de 19 de dezembro de 1974), função técnica que pode ser suprida por prova pericial realizada por ordem do juízo da causa, ou por prova técnica realizada no âmbito administrativo que se mostre coerente com os demais elementos de prova constante dos autos." - text: "Acrescento que não há de se falar em violação do artigo 114, § 3º, da Constituição Federal, posto que referido dispositivo revela-se impertinente, tratando da possibilidade de ajuizamento de dissídio coletivo pelo Ministério Público do Trabalho nos casos de greve em atividade essencial." - text: "Dispõe sobre o estágio de estudantes; altera a redação do art. 428 da Consolidação das Leis do Trabalho – CLT, aprovada pelo Decreto-Lei no 5.452, de 1o de maio de 1943, e a Lei no 9.394, de 20 de dezembro de 1996; revoga as Leis nos 6.494, de 7 de dezembro de 1977, e 8.859, de 23 de março de 1994, o parágrafo único do art. 82 da Lei no 9.394, de 20 de dezembro de 1996, e o art. 6o da Medida Provisória no 2.164-41, de 24 de agosto de 2001; e dá outras providências." --- ## (BERT base) NER model in the legal domain in Portuguese (LeNER-Br) **ner-bert-base-portuguese-cased-lenerbr** is a NER model (token classification) in the legal domain in Portuguese that was finetuned on 20/12/2021 in Google Colab from the model [pierreguillou/bert-base-cased-pt-lenerbr](https://huggingface.co/pierreguillou/bert-base-cased-pt-lenerbr) on the dataset [LeNER_br](https://huggingface.co/datasets/lener_br) by using a NER objective. Due to the small size of BERTimbau base and finetuning dataset, the model overfitted before to reach the end of training. Here are the overall final metrics on the validation dataset (*note: see the paragraph "Validation metrics by Named Entity" to get detailed metrics*): - **f1**: 0.8926146010186757 - **precision**: 0.8810222036028488 - **recall**: 0.9045161290322581 - **accuracy**: 0.9759397808828684 - **loss**: 0.18803243339061737 Check as well the [large version of this model](https://huggingface.co/pierreguillou/ner-bert-large-cased-pt-lenerbr) with a f1 of 0.908. **Note**: the model [pierreguillou/bert-base-cased-pt-lenerbr](https://huggingface.co/pierreguillou/bert-base-cased-pt-lenerbr) is a language model that was created through the finetuning of the model [BERTimbau base](https://huggingface.co/neuralmind/bert-base-portuguese-cased) on the dataset [LeNER-Br language modeling](https://huggingface.co/datasets/pierreguillou/lener_br_finetuning_language_model) by using a MASK objective. This first specialization of the language model before finetuning on the NER task improved a bit the model quality. To prove it, here are the results of the NER model finetuned from the model [BERTimbau base](https://huggingface.co/neuralmind/bert-base-portuguese-cased) (a non-specialized language model): - **f1**: 0.8716487228203504 - **precision**: 0.8559286898839138 - **recall**: 0.8879569892473118 - **accuracy**: 0.9755893153732458 - **loss**: 0.1133928969502449 ## Blog post [NLP | Modelos e Web App para Reconhecimento de Entidade Nomeada (NER) no domínio jurídico brasileiro](https://medium.com/@pierre_guillou/nlp-modelos-e-web-app-para-reconhecimento-de-entidade-nomeada-ner-no-dom%C3%ADnio-jur%C3%ADdico-b658db55edfb) (29/12/2021) ## Widget & App You can test this model into the widget of this page. Use as well the [NER App](https://huggingface.co/spaces/pierreguillou/ner-bert-pt-lenerbr) that allows comparing the 2 BERT models (base and large) fitted in the NER task with the legal LeNER-Br dataset. ## Using the model for inference in production ```` # install pytorch: check https://pytorch.org/ # !pip install transformers from transformers import AutoModelForTokenClassification, AutoTokenizer import torch # parameters model_name = "pierreguillou/ner-bert-base-cased-pt-lenerbr" model = AutoModelForTokenClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) input_text = "Acrescento que não há de se falar em violação do artigo 114, § 3º, da Constituição Federal, posto que referido dispositivo revela-se impertinente, tratando da possibilidade de ajuizamento de dissídio coletivo pelo Ministério Público do Trabalho nos casos de greve em atividade essencial." # tokenization inputs = tokenizer(input_text, max_length=512, truncation=True, return_tensors="pt") tokens = inputs.tokens() # get predictions outputs = model(**inputs).logits predictions = torch.argmax(outputs, dim=2) # print predictions for token, prediction in zip(tokens, predictions[0].numpy()): print((token, model.config.id2label[prediction])) ```` You can use pipeline, too. However, it seems to have an issue regarding to the max_length of the input sequence. ```` !pip install transformers import transformers from transformers import pipeline model_name = "pierreguillou/ner-bert-base-cased-pt-lenerbr" ner = pipeline( "ner", model=model_name ) ner(input_text) ```` ## Training procedure ### Notebook The notebook of finetuning ([HuggingFace_Notebook_token_classification_NER_LeNER_Br.ipynb](https://github.com/piegu/language-models/blob/master/HuggingFace_Notebook_token_classification_NER_LeNER_Br.ipynb)) is in github. ### Hyperparameters #### batch, learning rate... - per_device_batch_size = 2 - gradient_accumulation_steps = 2 - learning_rate = 2e-5 - num_train_epochs = 10 - weight_decay = 0.01 - optimizer = AdamW - betas = (0.9,0.999) - epsilon = 1e-08 - lr_scheduler_type = linear - seed = 7 #### save model & load best model - save_total_limit = 2 - logging_steps = 300 - eval_steps = logging_steps - evaluation_strategy = 'steps' - logging_strategy = 'steps' - save_strategy = 'steps' - save_steps = logging_steps - load_best_model_at_end = True - fp16 = True #### get best model through a metric - metric_for_best_model = 'eval_f1' - greater_is_better = True ### Training results ```` Num examples = 7828 Num Epochs = 10 Instantaneous batch size per device = 2 Total train batch size (w. parallel, distributed & accumulation) = 4 Gradient Accumulation steps = 2 Total optimization steps = 19570 Step Training Loss Validation Loss Precision Recall F1 Accuracy 300 0.127600 0.178613 0.722909 0.741720 0.732194 0.948802 600 0.088200 0.136965 0.733636 0.867742 0.795074 0.963079 900 0.078000 0.128858 0.791912 0.838065 0.814335 0.965243 1200 0.077800 0.126345 0.815400 0.865376 0.839645 0.967849 1500 0.074100 0.148207 0.779274 0.895914 0.833533 0.960184 1800 0.059500 0.116634 0.830829 0.868172 0.849090 0.969342 2100 0.044500 0.208459 0.887150 0.816559 0.850392 0.960535 2400 0.029400 0.136352 0.867821 0.851398 0.859531 0.970271 2700 0.025000 0.165837 0.814881 0.878495 0.845493 0.961235 3000 0.038400 0.120629 0.811719 0.893763 0.850768 0.971506 3300 0.026200 0.175094 0.823435 0.882581 0.851983 0.962957 3600 0.025600 0.178438 0.881095 0.886022 0.883551 0.963689 3900 0.041000 0.134648 0.789035 0.916129 0.847846 0.967681 4200 0.026700 0.130178 0.821275 0.903226 0.860303 0.972313 4500 0.018500 0.139294 0.844016 0.875054 0.859255 0.971140 4800 0.020800 0.197811 0.892504 0.873118 0.882705 0.965883 5100 0.019300 0.161239 0.848746 0.888172 0.868012 0.967849 5400 0.024000 0.139131 0.837507 0.913333 0.873778 0.970591 5700 0.018400 0.157223 0.899754 0.864731 0.881895 0.970210 6000 0.023500 0.137022 0.883018 0.873333 0.878149 0.973243 6300 0.009300 0.181448 0.840490 0.900860 0.869628 0.968290 6600 0.019200 0.173125 0.821316 0.896559 0.857290 0.966736 6900 0.016100 0.143160 0.789938 0.904946 0.843540 0.968245 7200 0.017000 0.145755 0.823274 0.897634 0.858848 0.969037 7500 0.012100 0.159342 0.825694 0.883226 0.853491 0.967468 7800 0.013800 0.194886 0.861237 0.859570 0.860403 0.964771 8100 0.008000 0.140271 0.829914 0.896129 0.861752 0.971567 8400 0.010300 0.143318 0.826844 0.908817 0.865895 0.973578 8700 0.015000 0.143392 0.847336 0.889247 0.867786 0.973365 9000 0.006000 0.143512 0.847795 0.905591 0.875741 0.972892 9300 0.011800 0.138747 0.827133 0.894194 0.859357 0.971673 9600 0.008500 0.159490 0.837030 0.909032 0.871546 0.970028 9900 0.010700 0.159249 0.846692 0.910968 0.877655 0.970546 10200 0.008100 0.170069 0.848288 0.900645 0.873683 0.969113 10500 0.004800 0.183795 0.860317 0.899355 0.879403 0.969570 10800 0.010700 0.157024 0.837838 0.906667 0.870894 0.971094 11100 0.003800 0.164286 0.845312 0.880215 0.862410 0.970744 11400 0.009700 0.204025 0.884294 0.887527 0.885907 0.968854 11700 0.008900 0.162819 0.829415 0.887742 0.857588 0.970530 12000 0.006400 0.164296 0.852666 0.901075 0.876202 0.971414 12300 0.007100 0.143367 0.852959 0.895699 0.873807 0.973669 12600 0.015800 0.153383 0.859224 0.900430 0.879345 0.972679 12900 0.006600 0.173447 0.869954 0.899140 0.884306 0.970927 13200 0.006800 0.163234 0.856849 0.897204 0.876563 0.971795 13500 0.003200 0.167164 0.850867 0.907957 0.878485 0.971231 13800 0.003600 0.148950 0.867801 0.910538 0.888656 0.976961 14100 0.003500 0.155691 0.847621 0.907957 0.876752 0.974127 14400 0.003300 0.157672 0.846553 0.911183 0.877680 0.974584 14700 0.002500 0.169965 0.847804 0.917634 0.881338 0.973045 15000 0.003400 0.177099 0.842199 0.912473 0.875929 0.971155 15300 0.006000 0.164151 0.848928 0.911183 0.878954 0.973258 15600 0.002400 0.174305 0.847437 0.906667 0.876052 0.971765 15900 0.004100 0.174561 0.852929 0.907957 0.879583 0.972907 16200 0.002600 0.172626 0.843263 0.907097 0.874016 0.972100 16500 0.002100 0.185302 0.841108 0.907312 0.872957 0.970485 16800 0.002900 0.175638 0.840557 0.909247 0.873554 0.971704 17100 0.001600 0.178750 0.857056 0.906452 0.881062 0.971765 17400 0.003900 0.188910 0.853619 0.907957 0.879950 0.970835 17700 0.002700 0.180822 0.864699 0.907097 0.885390 0.972283 18000 0.001300 0.179974 0.868150 0.906237 0.886785 0.973060 18300 0.000800 0.188032 0.881022 0.904516 0.892615 0.972572 18600 0.002700 0.183266 0.868601 0.901290 0.884644 0.972298 18900 0.001600 0.180301 0.862041 0.903011 0.882050 0.972344 19200 0.002300 0.183432 0.855370 0.904301 0.879155 0.971109 19500 0.001800 0.183381 0.854501 0.904301 0.878696 0.971186 ```` ### Validation metrics by Named Entity ```` Num examples = 1177 {'JURISPRUDENCIA': {'f1': 0.7016574585635359, 'number': 657, 'precision': 0.6422250316055625, 'recall': 0.7732115677321156}, 'LEGISLACAO': {'f1': 0.8839681133746677, 'number': 571, 'precision': 0.8942652329749103, 'recall': 0.8739054290718039}, 'LOCAL': {'f1': 0.8253968253968254, 'number': 194, 'precision': 0.7368421052631579, 'recall': 0.9381443298969072}, 'ORGANIZACAO': {'f1': 0.8934049079754601, 'number': 1340, 'precision': 0.918769716088328, 'recall': 0.8694029850746269}, 'PESSOA': {'f1': 0.982653539615565, 'number': 1072, 'precision': 0.9877474081055608, 'recall': 0.9776119402985075}, 'TEMPO': {'f1': 0.9657657657657657, 'number': 816, 'precision': 0.9469964664310954, 'recall': 0.9852941176470589}, 'overall_accuracy': 0.9725722644643211, 'overall_f1': 0.8926146010186757, 'overall_precision': 0.8810222036028488, 'overall_recall': 0.9045161290322581} ````
SG161222/Realistic_Vision_V6.0_B1_noVAE
SG161222
"2024-04-12T15:39:17Z"
100,562
125
diffusers
[ "diffusers", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
null
"2023-11-29T08:02:09Z"
--- license: creativeml-openrail-m --- <b>This model is available on <a href="https://www.mage.space/">Mage.Space</a> (main sponsor)</b><br> <b>You can support me directly on Boosty - https://boosty.to/sg_161222</b><br> <b>Please read this!</b><br> This is not yet the full version of the model (read the <b>"Model Description"</b> section).<br> For version 6.0 it is recommended to use with VAE (to improve generation quality and get rid of artifacts): https://huggingface.co/stabilityai/sd-vae-ft-mse-original<br> <b>Model Description</b><br> Realistic Vision V6.0 "New Vision" is a global update for the Realistic Vision model, which will be released gradually in several beta versions until the full release. The model is aimed at realism and photorealism.<br> CivitAI Page: https://civitai.com/models/4201/realistic-vision-v60-b1?modelVersionId=245598 <b>Resolutions (use lower resolution if you get a lot of mutations and stuff like that)</b><br> - Face Portrait: 896x896<br> - Portrait: 896x896, 768x1024<br> - Half Body: 768x1024, 640x1152<br> - Full Body: 896x896, 768x1024, 640x1152, 1024x768, 1152x640<br> <b>Improvements</b> - increased generation resolution to such resolutions as: 896x896, 768x1024, 640x1152, 1024x768, 1152x640. (note. in some cases there may still be mutations, duplications, etc -> will be fixed in future versions).<br> - improved sfw and nsfw for female and female anatomy (note. not all poses work correctly in such large resolutions -> will be fixed in future versions).<br> <b>Recommended Workflow</b><br> Images can be generated with or without Hires.Fix, but it will help improve the generation quality significantly. In some cases it is strictly recommended to use Hires.Fix, namely when generating full body and half body images (note: you can also use Restore Faces or ADetailer).<br> <b>Recommended Generation Parameters</b><br> Sampler: DPM++ SDE Karras (25+ steps) / DPM++ 2M SDE (50+ steps)<br> Negative Prompt: (deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime), text, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck<br> <b>Recommended Hires.Fix Parameters</b><br> Sampler: DPM++ SDE Karras or DPM++ 2M SDE<br> Denoising steps: 10+ (DPM++ SDE Karras) / 20+ (DPM++ 2M SDE (notice. the lower the value of hires steps at a given sampler, the stronger the skin texture and the higher the chance of getting artifacts))<br> Denoising strength: 0.1-0.3<br> Upscaler: 4x-UltraSharp / 4x_NMKD-Superscale-SP_178000_G or another<br> Upscale by: 1.1-2.0+<br>
lewtun/tiny-random-mt5
lewtun
"2022-09-15T15:04:49Z"
100,062
0
transformers
[ "transformers", "pytorch", "mt5", "feature-extraction", "endpoints_compatible", "text-generation-inference", "region:us" ]
feature-extraction
"2022-09-15T15:03:33Z"
Entry not found
alaggung/bart-r3f
alaggung
"2022-01-11T16:18:32Z"
99,165
6
transformers
[ "transformers", "pytorch", "tf", "bart", "text2text-generation", "summarization", "ko", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
summarization
"2022-03-02T23:29:05Z"
--- language: - ko tags: - summarization widget: - text: "[BOS]밥 ㄱ?[SEP]고고고고 뭐 먹을까?[SEP]어제 김치찌개 먹어서 한식말고 딴 거[SEP]그럼 돈까스 어때?[SEP]오 좋다 1시 학관 앞으로 오셈[SEP]ㅇㅋ[EOS]" inference: parameters: max_length: 64 top_k: 5 --- # BART R3F [2021 훈민정음 한국어 음성•자연어 인공지능 경진대회] 대화요약 부문 알라꿍달라꿍 팀의 대화요약 학습 샘플 모델을 공유합니다. [bart-pretrained](https://huggingface.co/alaggung/bart-pretrained) 모델에 [2021-dialogue-summary-competition](https://github.com/cosmoquester/2021-dialogue-summary-competition) 레포지토리의 R3F를 적용해 대화요약 Task를 학습한 모델입니다. 데이터는 [AIHub 한국어 대화요약](https://aihub.or.kr/aidata/30714) 데이터를 사용하였습니다.
BAAI/bge-small-zh-v1.5
BAAI
"2023-10-12T03:35:59Z"
98,856
25
transformers
[ "transformers", "pytorch", "safetensors", "bert", "feature-extraction", "zh", "arxiv:2310.07554", "arxiv:2309.07597", "license:mit", "endpoints_compatible", "has_space", "region:us" ]
feature-extraction
"2023-09-12T05:22:29Z"
--- license: mit language: - zh --- <h1 align="center">FlagEmbedding</h1> <h4 align="center"> <p> <a href=#model-list>Model List</a> | <a href=#frequently-asked-questions>FAQ</a> | <a href=#usage>Usage</a> | <a href="#evaluation">Evaluation</a> | <a href="#train">Train</a> | <a href="#contact">Contact</a> | <a href="#citation">Citation</a> | <a href="#license">License</a> <p> </h4> More details please refer to our Github: [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding). [English](README.md) | [中文](https://github.com/FlagOpen/FlagEmbedding/blob/master/README_zh.md) FlagEmbedding can map any text to a low-dimensional dense vector which can be used for tasks like retrieval, classification, clustering, or semantic search. And it also can be used in vector databases for LLMs. ************* 🌟**Updates**🌟 ************* - 10/12/2023: Release [LLM-Embedder](./FlagEmbedding/llm_embedder/README.md), a unified embedding model to support diverse retrieval augmentation needs for LLMs. [Paper](https://arxiv.org/pdf/2310.07554.pdf) :fire: - 09/15/2023: The [technical report](https://arxiv.org/pdf/2309.07597.pdf) of BGE has been released - 09/15/2023: The [masive training data](https://data.baai.ac.cn/details/BAAI-MTP) of BGE has been released - 09/12/2023: New models: - **New reranker model**: release cross-encoder models `BAAI/bge-reranker-base` and `BAAI/bge-reranker-large`, which are more powerful than embedding model. We recommend to use/fine-tune them to re-rank top-k documents returned by embedding models. - **update embedding model**: release `bge-*-v1.5` embedding model to alleviate the issue of the similarity distribution, and enhance its retrieval ability without instruction. <details> <summary>More</summary> <!-- ### More --> - 09/07/2023: Update [fine-tune code](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md): Add script to mine hard negatives and support adding instruction during fine-tuning. - 08/09/2023: BGE Models are integrated into **Langchain**, you can use it like [this](#using-langchain); C-MTEB **leaderboard** is [available](https://huggingface.co/spaces/mteb/leaderboard). - 08/05/2023: Release base-scale and small-scale models, **best performance among the models of the same size 🤗** - 08/02/2023: Release `bge-large-*`(short for BAAI General Embedding) Models, **rank 1st on MTEB and C-MTEB benchmark!** :tada: :tada: - 08/01/2023: We release the [Chinese Massive Text Embedding Benchmark](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB) (**C-MTEB**), consisting of 31 test dataset. </details> ## Model List `bge` is short for `BAAI general embedding`. | Model | Language | | Description | query instruction for retrieval [1] | |:-------------------------------|:--------:| :--------:| :--------:|:--------:| | [BAAI/llm-embedder](https://huggingface.co/BAAI/llm-embedder) | English | [Inference](./FlagEmbedding/llm_embedder/README.md) [Fine-tune](./FlagEmbedding/llm_embedder/README.md) | a unified embedding model to support diverse retrieval augmentation needs for LLMs | See [README](./FlagEmbedding/llm_embedder/README.md) | | [BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) | Chinese and English | [Inference](#usage-for-reranker) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker) | a cross-encoder model which is more accurate but less efficient [2] | | | [BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base) | Chinese and English | [Inference](#usage-for-reranker) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker) | a cross-encoder model which is more accurate but less efficient [2] | | | [BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-large-zh-v1.5](https://huggingface.co/BAAI/bge-large-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-base-zh-v1.5](https://huggingface.co/BAAI/bge-base-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-small-zh-v1.5](https://huggingface.co/BAAI/bge-small-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-large-en](https://huggingface.co/BAAI/bge-large-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | :trophy: rank **1st** in [MTEB](https://huggingface.co/spaces/mteb/leaderboard) leaderboard | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-base-en](https://huggingface.co/BAAI/bge-base-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a base-scale model but with similar ability to `bge-large-en` | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-small-en](https://huggingface.co/BAAI/bge-small-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) |a small-scale model but with competitive performance | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-large-zh](https://huggingface.co/BAAI/bge-large-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | :trophy: rank **1st** in [C-MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB) benchmark | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-base-zh](https://huggingface.co/BAAI/bge-base-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a base-scale model but with similar ability to `bge-large-zh` | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-small-zh](https://huggingface.co/BAAI/bge-small-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a small-scale model but with competitive performance | `为这个句子生成表示以用于检索相关文章:` | [1\]: If you need to search the relevant passages to a query, we suggest to add the instruction to the query; in other cases, no instruction is needed, just use the original query directly. In all cases, **no instruction** needs to be added to passages. [2\]: Different from embedding model, reranker uses question and document as input and directly output similarity instead of embedding. To balance the accuracy and time cost, cross-encoder is widely used to re-rank top-k documents retrieved by other simple models. For examples, use bge embedding model to retrieve top 100 relevant documents, and then use bge reranker to re-rank the top 100 document to get the final top-3 results. All models have been uploaded to Huggingface Hub, and you can see them at https://huggingface.co/BAAI. If you cannot open the Huggingface Hub, you also can download the models at https://model.baai.ac.cn/models . ## Frequently asked questions <details> <summary>1. How to fine-tune bge embedding model?</summary> <!-- ### How to fine-tune bge embedding model? --> Following this [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) to prepare data and fine-tune your model. Some suggestions: - Mine hard negatives following this [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune#hard-negatives), which can improve the retrieval performance. - If you pre-train bge on your data, the pre-trained model cannot be directly used to calculate similarity, and it must be fine-tuned with contrastive learning before computing similarity. - If the accuracy of the fine-tuned model is still not high, it is recommended to use/fine-tune the cross-encoder model (bge-reranker) to re-rank top-k results. Hard negatives also are needed to fine-tune reranker. </details> <details> <summary>2. The similarity score between two dissimilar sentences is higher than 0.5</summary> <!-- ### The similarity score between two dissimilar sentences is higher than 0.5 --> **Suggest to use bge v1.5, which alleviates the issue of the similarity distribution.** Since we finetune the models by contrastive learning with a temperature of 0.01, the similarity distribution of the current BGE model is about in the interval \[0.6, 1\]. So a similarity score greater than 0.5 does not indicate that the two sentences are similar. For downstream tasks, such as passage retrieval or semantic similarity, **what matters is the relative order of the scores, not the absolute value.** If you need to filter similar sentences based on a similarity threshold, please select an appropriate similarity threshold based on the similarity distribution on your data (such as 0.8, 0.85, or even 0.9). </details> <details> <summary>3. When does the query instruction need to be used</summary> <!-- ### When does the query instruction need to be used --> For the `bge-*-v1.5`, we improve its retrieval ability when not using instruction. No instruction only has a slight degradation in retrieval performance compared with using instruction. So you can generate embedding without instruction in all cases for convenience. For a retrieval task that uses short queries to find long related documents, it is recommended to add instructions for these short queries. **The best method to decide whether to add instructions for queries is choosing the setting that achieves better performance on your task.** In all cases, the documents/passages do not need to add the instruction. </details> ## Usage ### Usage for Embedding Model Here are some examples for using `bge` models with [FlagEmbedding](#using-flagembedding), [Sentence-Transformers](#using-sentence-transformers), [Langchain](#using-langchain), or [Huggingface Transformers](#using-huggingface-transformers). #### Using FlagEmbedding ``` pip install -U FlagEmbedding ``` If it doesn't work for you, you can see [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md) for more methods to install FlagEmbedding. ```python from FlagEmbedding import FlagModel sentences_1 = ["样例数据-1", "样例数据-2"] sentences_2 = ["样例数据-3", "样例数据-4"] model = FlagModel('BAAI/bge-large-zh-v1.5', query_instruction_for_retrieval="为这个句子生成表示以用于检索相关文章:", use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation embeddings_1 = model.encode(sentences_1) embeddings_2 = model.encode(sentences_2) similarity = embeddings_1 @ embeddings_2.T print(similarity) # for s2p(short query to long passage) retrieval task, suggest to use encode_queries() which will automatically add the instruction to each query # corpus in retrieval task can still use encode() or encode_corpus(), since they don't need instruction queries = ['query_1', 'query_2'] passages = ["样例文档-1", "样例文档-2"] q_embeddings = model.encode_queries(queries) p_embeddings = model.encode(passages) scores = q_embeddings @ p_embeddings.T ``` For the value of the argument `query_instruction_for_retrieval`, see [Model List](https://github.com/FlagOpen/FlagEmbedding/tree/master#model-list). By default, FlagModel will use all available GPUs when encoding. Please set `os.environ["CUDA_VISIBLE_DEVICES"]` to select specific GPUs. You also can set `os.environ["CUDA_VISIBLE_DEVICES"]=""` to make all GPUs unavailable. #### Using Sentence-Transformers You can also use the `bge` models with [sentence-transformers](https://www.SBERT.net): ``` pip install -U sentence-transformers ``` ```python from sentence_transformers import SentenceTransformer sentences_1 = ["样例数据-1", "样例数据-2"] sentences_2 = ["样例数据-3", "样例数据-4"] model = SentenceTransformer('BAAI/bge-large-zh-v1.5') embeddings_1 = model.encode(sentences_1, normalize_embeddings=True) embeddings_2 = model.encode(sentences_2, normalize_embeddings=True) similarity = embeddings_1 @ embeddings_2.T print(similarity) ``` For s2p(short query to long passage) retrieval task, each short query should start with an instruction (instructions see [Model List](https://github.com/FlagOpen/FlagEmbedding/tree/master#model-list)). But the instruction is not needed for passages. ```python from sentence_transformers import SentenceTransformer queries = ['query_1', 'query_2'] passages = ["样例文档-1", "样例文档-2"] instruction = "为这个句子生成表示以用于检索相关文章:" model = SentenceTransformer('BAAI/bge-large-zh-v1.5') q_embeddings = model.encode([instruction+q for q in queries], normalize_embeddings=True) p_embeddings = model.encode(passages, normalize_embeddings=True) scores = q_embeddings @ p_embeddings.T ``` #### Using Langchain You can use `bge` in langchain like this: ```python from langchain.embeddings import HuggingFaceBgeEmbeddings model_name = "BAAI/bge-large-en-v1.5" model_kwargs = {'device': 'cuda'} encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity model = HuggingFaceBgeEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs, query_instruction="为这个句子生成表示以用于检索相关文章:" ) model.query_instruction = "为这个句子生成表示以用于检索相关文章:" ``` #### Using HuggingFace Transformers With the transformers package, you can use the model like this: First, you pass your input through the transformer model, then you select the last hidden state of the first token (i.e., [CLS]) as the sentence embedding. ```python from transformers import AutoTokenizer, AutoModel import torch # Sentences we want sentence embeddings for sentences = ["样例数据-1", "样例数据-2"] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-large-zh-v1.5') model = AutoModel.from_pretrained('BAAI/bge-large-zh-v1.5') model.eval() # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # for s2p(short query to long passage) retrieval task, add an instruction to query (not add instruction for passages) # encoded_input = tokenizer([instruction + q for q in queries], padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, cls pooling. sentence_embeddings = model_output[0][:, 0] # normalize embeddings sentence_embeddings = torch.nn.functional.normalize(sentence_embeddings, p=2, dim=1) print("Sentence embeddings:", sentence_embeddings) ``` ### Usage for Reranker Different from embedding model, reranker uses question and document as input and directly output similarity instead of embedding. You can get a relevance score by inputting query and passage to the reranker. The reranker is optimized based cross-entropy loss, so the relevance score is not bounded to a specific range. #### Using FlagEmbedding ``` pip install -U FlagEmbedding ``` Get relevance scores (higher scores indicate more relevance): ```python from FlagEmbedding import FlagReranker reranker = FlagReranker('BAAI/bge-reranker-large', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation score = reranker.compute_score(['query', 'passage']) print(score) scores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']]) print(scores) ``` #### Using Huggingface transformers ```python import torch from transformers import AutoModelForSequenceClassification, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-large') model = AutoModelForSequenceClassification.from_pretrained('BAAI/bge-reranker-large') model.eval() pairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']] with torch.no_grad(): inputs = tokenizer(pairs, padding=True, truncation=True, return_tensors='pt', max_length=512) scores = model(**inputs, return_dict=True).logits.view(-1, ).float() print(scores) ``` ## Evaluation `baai-general-embedding` models achieve **state-of-the-art performance on both MTEB and C-MTEB leaderboard!** For more details and evaluation tools see our [scripts](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/README.md). - **MTEB**: | Model Name | Dimension | Sequence Length | Average (56) | Retrieval (15) |Clustering (11) | Pair Classification (3) | Reranking (4) | STS (10) | Summarization (1) | Classification (12) | |:----:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| | [BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5) | 1024 | 512 | **64.23** | **54.29** | 46.08 | 87.12 | 60.03 | 83.11 | 31.61 | 75.97 | | [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) | 768 | 512 | 63.55 | 53.25 | 45.77 | 86.55 | 58.86 | 82.4 | 31.07 | 75.53 | | [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) | 384 | 512 | 62.17 |51.68 | 43.82 | 84.92 | 58.36 | 81.59 | 30.12 | 74.14 | | [bge-large-en](https://huggingface.co/BAAI/bge-large-en) | 1024 | 512 | 63.98 | 53.9 | 46.98 | 85.8 | 59.48 | 81.56 | 32.06 | 76.21 | | [bge-base-en](https://huggingface.co/BAAI/bge-base-en) | 768 | 512 | 63.36 | 53.0 | 46.32 | 85.86 | 58.7 | 81.84 | 29.27 | 75.27 | | [gte-large](https://huggingface.co/thenlper/gte-large) | 1024 | 512 | 63.13 | 52.22 | 46.84 | 85.00 | 59.13 | 83.35 | 31.66 | 73.33 | | [gte-base](https://huggingface.co/thenlper/gte-base) | 768 | 512 | 62.39 | 51.14 | 46.2 | 84.57 | 58.61 | 82.3 | 31.17 | 73.01 | | [e5-large-v2](https://huggingface.co/intfloat/e5-large-v2) | 1024| 512 | 62.25 | 50.56 | 44.49 | 86.03 | 56.61 | 82.05 | 30.19 | 75.24 | | [bge-small-en](https://huggingface.co/BAAI/bge-small-en) | 384 | 512 | 62.11 | 51.82 | 44.31 | 83.78 | 57.97 | 80.72 | 30.53 | 74.37 | | [instructor-xl](https://huggingface.co/hkunlp/instructor-xl) | 768 | 512 | 61.79 | 49.26 | 44.74 | 86.62 | 57.29 | 83.06 | 32.32 | 61.79 | | [e5-base-v2](https://huggingface.co/intfloat/e5-base-v2) | 768 | 512 | 61.5 | 50.29 | 43.80 | 85.73 | 55.91 | 81.05 | 30.28 | 73.84 | | [gte-small](https://huggingface.co/thenlper/gte-small) | 384 | 512 | 61.36 | 49.46 | 44.89 | 83.54 | 57.7 | 82.07 | 30.42 | 72.31 | | [text-embedding-ada-002](https://platform.openai.com/docs/guides/embeddings) | 1536 | 8192 | 60.99 | 49.25 | 45.9 | 84.89 | 56.32 | 80.97 | 30.8 | 70.93 | | [e5-small-v2](https://huggingface.co/intfloat/e5-base-v2) | 384 | 512 | 59.93 | 49.04 | 39.92 | 84.67 | 54.32 | 80.39 | 31.16 | 72.94 | | [sentence-t5-xxl](https://huggingface.co/sentence-transformers/sentence-t5-xxl) | 768 | 512 | 59.51 | 42.24 | 43.72 | 85.06 | 56.42 | 82.63 | 30.08 | 73.42 | | [all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) | 768 | 514 | 57.78 | 43.81 | 43.69 | 83.04 | 59.36 | 80.28 | 27.49 | 65.07 | | [sgpt-bloom-7b1-msmarco](https://huggingface.co/bigscience/sgpt-bloom-7b1-msmarco) | 4096 | 2048 | 57.59 | 48.22 | 38.93 | 81.9 | 55.65 | 77.74 | 33.6 | 66.19 | - **C-MTEB**: We create the benchmark C-MTEB for Chinese text embedding which consists of 31 datasets from 6 tasks. Please refer to [C_MTEB](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/README.md) for a detailed introduction. | Model | Embedding dimension | Avg | Retrieval | STS | PairClassification | Classification | Reranking | Clustering | |:-------------------------------|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:| | [**BAAI/bge-large-zh-v1.5**](https://huggingface.co/BAAI/bge-large-zh-v1.5) | 1024 | **64.53** | 70.46 | 56.25 | 81.6 | 69.13 | 65.84 | 48.99 | | [BAAI/bge-base-zh-v1.5](https://huggingface.co/BAAI/bge-base-zh-v1.5) | 768 | 63.13 | 69.49 | 53.72 | 79.75 | 68.07 | 65.39 | 47.53 | | [BAAI/bge-small-zh-v1.5](https://huggingface.co/BAAI/bge-small-zh-v1.5) | 512 | 57.82 | 61.77 | 49.11 | 70.41 | 63.96 | 60.92 | 44.18 | | [BAAI/bge-large-zh](https://huggingface.co/BAAI/bge-large-zh) | 1024 | 64.20 | 71.53 | 54.98 | 78.94 | 68.32 | 65.11 | 48.39 | | [bge-large-zh-noinstruct](https://huggingface.co/BAAI/bge-large-zh-noinstruct) | 1024 | 63.53 | 70.55 | 53 | 76.77 | 68.58 | 64.91 | 50.01 | | [BAAI/bge-base-zh](https://huggingface.co/BAAI/bge-base-zh) | 768 | 62.96 | 69.53 | 54.12 | 77.5 | 67.07 | 64.91 | 47.63 | | [multilingual-e5-large](https://huggingface.co/intfloat/multilingual-e5-large) | 1024 | 58.79 | 63.66 | 48.44 | 69.89 | 67.34 | 56.00 | 48.23 | | [BAAI/bge-small-zh](https://huggingface.co/BAAI/bge-small-zh) | 512 | 58.27 | 63.07 | 49.45 | 70.35 | 63.64 | 61.48 | 45.09 | | [m3e-base](https://huggingface.co/moka-ai/m3e-base) | 768 | 57.10 | 56.91 | 50.47 | 63.99 | 67.52 | 59.34 | 47.68 | | [m3e-large](https://huggingface.co/moka-ai/m3e-large) | 1024 | 57.05 | 54.75 | 50.42 | 64.3 | 68.2 | 59.66 | 48.88 | | [multilingual-e5-base](https://huggingface.co/intfloat/multilingual-e5-base) | 768 | 55.48 | 61.63 | 46.49 | 67.07 | 65.35 | 54.35 | 40.68 | | [multilingual-e5-small](https://huggingface.co/intfloat/multilingual-e5-small) | 384 | 55.38 | 59.95 | 45.27 | 66.45 | 65.85 | 53.86 | 45.26 | | [text-embedding-ada-002(OpenAI)](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings) | 1536 | 53.02 | 52.0 | 43.35 | 69.56 | 64.31 | 54.28 | 45.68 | | [luotuo](https://huggingface.co/silk-road/luotuo-bert-medium) | 1024 | 49.37 | 44.4 | 42.78 | 66.62 | 61 | 49.25 | 44.39 | | [text2vec-base](https://huggingface.co/shibing624/text2vec-base-chinese) | 768 | 47.63 | 38.79 | 43.41 | 67.41 | 62.19 | 49.45 | 37.66 | | [text2vec-large](https://huggingface.co/GanymedeNil/text2vec-large-chinese) | 1024 | 47.36 | 41.94 | 44.97 | 70.86 | 60.66 | 49.16 | 30.02 | - **Reranking**: See [C_MTEB](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/) for evaluation script. | Model | T2Reranking | T2RerankingZh2En\* | T2RerankingEn2Zh\* | MMarcoReranking | CMedQAv1 | CMedQAv2 | Avg | |:-------------------------------|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:| | text2vec-base-multilingual | 64.66 | 62.94 | 62.51 | 14.37 | 48.46 | 48.6 | 50.26 | | multilingual-e5-small | 65.62 | 60.94 | 56.41 | 29.91 | 67.26 | 66.54 | 57.78 | | multilingual-e5-large | 64.55 | 61.61 | 54.28 | 28.6 | 67.42 | 67.92 | 57.4 | | multilingual-e5-base | 64.21 | 62.13 | 54.68 | 29.5 | 66.23 | 66.98 | 57.29 | | m3e-base | 66.03 | 62.74 | 56.07 | 17.51 | 77.05 | 76.76 | 59.36 | | m3e-large | 66.13 | 62.72 | 56.1 | 16.46 | 77.76 | 78.27 | 59.57 | | bge-base-zh-v1.5 | 66.49 | 63.25 | 57.02 | 29.74 | 80.47 | 84.88 | 63.64 | | bge-large-zh-v1.5 | 65.74 | 63.39 | 57.03 | 28.74 | 83.45 | 85.44 | 63.97 | | [BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base) | 67.28 | 63.95 | 60.45 | 35.46 | 81.26 | 84.1 | 65.42 | | [BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) | 67.6 | 64.03 | 61.44 | 37.16 | 82.15 | 84.18 | 66.09 | \* : T2RerankingZh2En and T2RerankingEn2Zh are cross-language retrieval tasks ## Train ### BAAI Embedding We pre-train the models using [retromae](https://github.com/staoxiao/RetroMAE) and train them on large-scale pairs data using contrastive learning. **You can fine-tune the embedding model on your data following our [examples](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune).** We also provide a [pre-train example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/pretrain). Note that the goal of pre-training is to reconstruct the text, and the pre-trained model cannot be used for similarity calculation directly, it needs to be fine-tuned. More training details for bge see [baai_general_embedding](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md). ### BGE Reranker Cross-encoder will perform full-attention over the input pair, which is more accurate than embedding model (i.e., bi-encoder) but more time-consuming than embedding model. Therefore, it can be used to re-rank the top-k documents returned by embedding model. We train the cross-encoder on a multilingual pair data, The data format is the same as embedding model, so you can fine-tune it easily following our [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker). More details please refer to [./FlagEmbedding/reranker/README.md](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/reranker) ## Contact If you have any question or suggestion related to this project, feel free to open an issue or pull request. You also can email Shitao Xiao(stxiao@baai.ac.cn) and Zheng Liu(liuzheng@baai.ac.cn). ## Citation If you find this repository useful, please consider giving a star :star: and citation ``` @misc{bge_embedding, title={C-Pack: Packaged Resources To Advance General Chinese Embedding}, author={Shitao Xiao and Zheng Liu and Peitian Zhang and Niklas Muennighoff}, year={2023}, eprint={2309.07597}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ## License FlagEmbedding is licensed under the [MIT License](https://github.com/FlagOpen/FlagEmbedding/blob/master/LICENSE). The released models can be used for commercial purposes free of charge.
allenai/unifiedqa-v2-t5-large-1363200
allenai
"2023-01-24T16:28:30Z"
98,781
3
transformers
[ "transformers", "pytorch", "t5", "text2text-generation", "en", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text2text-generation
"2022-03-02T23:29:05Z"
--- language: en --- # Further details: https://github.com/allenai/unifiedqa
jbochi/madlad400-3b-mt
jbochi
"2024-01-10T15:00:28Z"
98,418
104
transformers
[ "transformers", "safetensors", "gguf", "t5", "text2text-generation", "text-generation-inference", "translation", "multilingual", "en", "ru", "es", "fr", "de", "it", "pt", "pl", "nl", "vi", "tr", "sv", "id", "ro", "cs", "zh", "hu", "ja", "th", "fi", "fa", "uk", "da", "el", "no", "bg", "sk", "ko", "ar", "lt", "ca", "sl", "he", "et", "lv", "hi", "sq", "ms", "az", "sr", "ta", "hr", "kk", "is", "ml", "mr", "te", "af", "gl", "fil", "be", "mk", "eu", "bn", "ka", "mn", "bs", "uz", "ur", "sw", "yue", "ne", "kn", "kaa", "gu", "si", "cy", "eo", "la", "hy", "ky", "tg", "ga", "mt", "my", "km", "tt", "so", "ku", "ps", "pa", "rw", "lo", "ha", "dv", "fy", "lb", "ckb", "mg", "gd", "am", "ug", "ht", "grc", "hmn", "sd", "jv", "mi", "tk", "ceb", "yi", "ba", "fo", "or", "xh", "su", "kl", "ny", "sm", "sn", "co", "zu", "ig", "yo", "pap", "st", "haw", "as", "oc", "cv", "lus", "tet", "gsw", "sah", "br", "rm", "sa", "bo", "om", "se", "ce", "cnh", "ilo", "hil", "udm", "os", "lg", "ti", "vec", "ts", "tyv", "kbd", "ee", "iba", "av", "kha", "to", "tn", "nso", "fj", "zza", "ak", "ada", "otq", "dz", "bua", "cfm", "ln", "chm", "gn", "krc", "wa", "hif", "yua", "srn", "war", "rom", "bik", "pam", "sg", "lu", "ady", "kbp", "syr", "ltg", "myv", "iso", "kac", "bho", "ay", "kum", "qu", "za", "pag", "ngu", "ve", "pck", "zap", "tyz", "hui", "bbc", "tzo", "tiv", "ksd", "gom", "min", "ang", "nhe", "bgp", "nzi", "nnb", "nv", "zxx", "bci", "kv", "new", "mps", "alt", "meu", "bew", "fon", "iu", "abt", "mgh", "mnw", "tvl", "dov", "tlh", "ho", "kw", "mrj", "meo", "crh", "mbt", "emp", "ace", "ium", "mam", "gym", "mai", "crs", "pon", "ubu", "fip", "quc", "gv", "kj", "btx", "ape", "chk", "rcf", "shn", "tzh", "mdf", "ppk", "ss", "gag", "cab", "kri", "seh", "ibb", "tbz", "bru", "enq", "ach", "cuk", "kmb", "wo", "kek", "qub", "tab", "bts", "kos", "rwo", "cak", "tuc", "bum", "cjk", "gil", "stq", "tsg", "quh", "mak", "arn", "ban", "jiv", "sja", "yap", "tcy", "toj", "twu", "xal", "amu", "rmc", "hus", "nia", "kjh", "bm", "guh", "mas", "acf", "dtp", "ksw", "bzj", "din", "zne", "mad", "msi", "mag", "mkn", "kg", "lhu", "ch", "qvi", "mh", "djk", "sus", "mfe", "srm", "dyu", "ctu", "gui", "pau", "inb", "bi", "mni", "guc", "jam", "wal", "jac", "bas", "gor", "skr", "nyu", "noa", "sda", "gub", "nog", "cni", "teo", "tdx", "sxn", "rki", "nr", "frp", "alz", "taj", "lrc", "cce", "rn", "jvn", "hvn", "nij", "dwr", "izz", "msm", "bus", "ktu", "chr", "maz", "tzj", "suz", "knj", "bim", "gvl", "bqc", "tca", "pis", "prk", "laj", "mel", "qxr", "niq", "ahk", "shp", "hne", "spp", "koi", "krj", "quf", "luz", "agr", "tsc", "mqy", "gof", "gbm", "miq", "dje", "awa", "bjj", "qvz", "sjp", "tll", "raj", "kjg", "bgz", "quy", "cbk", "akb", "oj", "ify", "mey", "ks", "cac", "brx", "qup", "syl", "jax", "ff", "ber", "tks", "trp", "mrw", "adh", "smt", "srr", "ffm", "qvc", "mtr", "ann", "aa", "noe", "nut", "gyn", "kwi", "xmm", "msb", "dataset:allenai/MADLAD-400", "arxiv:2309.04662", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
translation
"2023-09-21T00:28:38Z"
--- license: apache-2.0 language: - multilingual - en - ru - es - fr - de - it - pt - pl - nl - vi - tr - sv - id - ro - cs - zh - hu - ja - th - fi - fa - uk - da - el - "no" - bg - sk - ko - ar - lt - ca - sl - he - et - lv - hi - sq - ms - az - sr - ta - hr - kk - is - ml - mr - te - af - gl - fil - be - mk - eu - bn - ka - mn - bs - uz - ur - sw - yue - ne - kn - kaa - gu - si - cy - eo - la - hy - ky - tg - ga - mt - my - km - tt - so - ku - ps - pa - rw - lo - ha - dv - fy - lb - ckb - mg - gd - am - ug - ht - grc - hmn - sd - jv - mi - tk - ceb - yi - ba - fo - or - xh - su - kl - ny - sm - sn - co - zu - ig - yo - pap - st - haw - as - oc - cv - lus - tet - gsw - sah - br - rm - sa - bo - om - se - ce - cnh - ilo - hil - udm - os - lg - ti - vec - ts - tyv - kbd - ee - iba - av - kha - to - tn - nso - fj - zza - ak - ada - otq - dz - bua - cfm - ln - chm - gn - krc - wa - hif - yua - srn - war - rom - bik - pam - sg - lu - ady - kbp - syr - ltg - myv - iso - kac - bho - ay - kum - qu - za - pag - ngu - ve - pck - zap - tyz - hui - bbc - tzo - tiv - ksd - gom - min - ang - nhe - bgp - nzi - nnb - nv - zxx - bci - kv - new - mps - alt - meu - bew - fon - iu - abt - mgh - mnw - tvl - dov - tlh - ho - kw - mrj - meo - crh - mbt - emp - ace - ium - mam - gym - mai - crs - pon - ubu - fip - quc - gv - kj - btx - ape - chk - rcf - shn - tzh - mdf - ppk - ss - gag - cab - kri - seh - ibb - tbz - bru - enq - ach - cuk - kmb - wo - kek - qub - tab - bts - kos - rwo - cak - tuc - bum - cjk - gil - stq - tsg - quh - mak - arn - ban - jiv - sja - yap - tcy - toj - twu - xal - amu - rmc - hus - nia - kjh - bm - guh - mas - acf - dtp - ksw - bzj - din - zne - mad - msi - mag - mkn - kg - lhu - ch - qvi - mh - djk - sus - mfe - srm - dyu - ctu - gui - pau - inb - bi - mni - guc - jam - wal - jac - bas - gor - skr - nyu - noa - sda - gub - nog - cni - teo - tdx - sxn - rki - nr - frp - alz - taj - lrc - cce - rn - jvn - hvn - nij - dwr - izz - msm - bus - ktu - chr - maz - tzj - suz - knj - bim - gvl - bqc - tca - pis - prk - laj - mel - qxr - niq - ahk - shp - hne - spp - koi - krj - quf - luz - agr - tsc - mqy - gof - gbm - miq - dje - awa - bjj - qvz - sjp - tll - raj - kjg - bgz - quy - cbk - akb - oj - ify - mey - ks - cac - brx - qup - syl - jax - ff - ber - tks - trp - mrw - adh - smt - srr - ffm - qvc - mtr - ann - kaa - aa - noe - nut - gyn - kwi - xmm - msb library_name: transformers tags: - text2text-generation - text-generation-inference datasets: - allenai/MADLAD-400 pipeline_tag: translation widget: - text: "<2en> Como vai, amigo?" example_title: "Translation to English" - text: "<2de> Do you speak German?" example_title: "Translation to German" --- # Model Card for MADLAD-400-3B-MT # Table of Contents 0. [TL;DR](#TL;DR) 1. [Model Details](#model-details) 2. [Usage](#usage) 3. [Uses](#uses) 4. [Bias, Risks, and Limitations](#bias-risks-and-limitations) 5. [Training Details](#training-details) 6. [Evaluation](#evaluation) 7. [Environmental Impact](#environmental-impact) 8. [Citation](#citation) # TL;DR MADLAD-400-3B-MT is a multilingual machine translation model based on the T5 architecture that was trained on 1 trillion tokens covering over 450 languages using publicly available data. It is competitive with models that are significantly larger. **Disclaimer**: [Juarez Bochi](https://huggingface.co/jbochi), who was not involved in this research, converted the original weights and wrote the contents of this model card based on the original paper and Flan-T5. # Model Details ## Model Description - **Model type:** Language model - **Language(s) (NLP):** Multilingual (400+ languages) - **License:** Apache 2.0 - **Related Models:** [All MADLAD-400 Checkpoints](https://huggingface.co/models?search=madlad) - **Original Checkpoints:** [All Original MADLAD-400 Checkpoints](https://github.com/google-research/google-research/tree/master/madlad_400) - **Resources for more information:** - [Research paper](https://arxiv.org/abs/2309.04662) - [GitHub Repo](https://github.com/google-research/t5x) - [Hugging Face MADLAD-400 Docs (Similar to T5) ](https://huggingface.co/docs/transformers/model_doc/MADLAD-400) - [Pending PR](https://github.com/huggingface/transformers/pull/27471) # Usage Find below some example scripts on how to use the model: ## Using the Pytorch model with `transformers` ### Running the model on a CPU or GPU <details> <summary> Click to expand </summary> First, install the Python packages that are required: `pip install transformers accelerate sentencepiece protobuf` ```python from transformers import T5ForConditionalGeneration, T5Tokenizer model_name = 'jbochi/madlad400-3b-mt' model = T5ForConditionalGeneration.from_pretrained(model_name, device_map="auto") tokenizer = T5Tokenizer.from_pretrained(model_name) text = "<2pt> I love pizza!" input_ids = tokenizer(text, return_tensors="pt").input_ids.to(model.device) outputs = model.generate(input_ids=input_ids) tokenizer.decode(outputs[0], skip_special_tokens=True) # Eu adoro pizza! ``` </details> ## Running the model with Candle <details> <summary> Click to expand </summary> Usage with [candle](https://github.com/huggingface/candle): ```bash $ cargo run --example t5 --release -- \ --model-id "jbochi/madlad400-3b-mt" \ --prompt "<2de> How are you, my friend?" \ --decode --temperature 0 ``` We also provide a quantized model (1.65 GB vs the original 11.8 GB file): ``` cargo run --example quantized-t5 --release -- \ --model-id "jbochi/madlad400-3b-mt" --weight-file "model-q4k.gguf" \ --prompt "<2de> How are you, my friend?" \ --temperature 0 ... Wie geht es dir, mein Freund? ``` </details> # Uses ## Direct Use and Downstream Use > Primary intended uses: Machine Translation and multilingual NLP tasks on over 400 languages. > Primary intended users: Research community. ## Out-of-Scope Use > These models are trained on general domain data and are therefore not meant to > work on domain-specific models out-of-the box. Moreover, these research models have not been assessed > for production usecases. # Bias, Risks, and Limitations > We note that we evaluate on only 204 of the languages supported by these models and on machine translation > and few-shot machine translation tasks. Users must consider use of this model carefully for their own > usecase. ## Ethical considerations and risks > We trained these models with MADLAD-400 and publicly available data to create baseline models that > support NLP for over 400 languages, with a focus on languages underrepresented in large-scale corpora. > Given that these models were trained with web-crawled datasets that may contain sensitive, offensive or > otherwise low-quality content despite extensive preprocessing, it is still possible that these issues to the > underlying training data may cause differences in model performance and toxic (or otherwise problematic) > output for certain domains. Moreover, large models are dual use technologies that have specific risks > associated with their use and development. We point the reader to surveys such as those written by > Weidinger et al. or Bommasani et al. for a more detailed discussion of these risks, and to Liebling > et al. for a thorough discussion of the risks of machine translation systems. ## Known Limitations More information needed ## Sensitive Use: More information needed # Training Details > We train models of various sizes: a 3B, 32-layer parameter model, > a 7.2B 48-layer parameter model and a 10.7B 32-layer parameter model. > We share all parameters of the model across language pairs, > and use a Sentence Piece Model with 256k tokens shared on both the encoder and decoder > side. Each input sentence has a <2xx> token prepended to the source sentence to indicate the target > language. See the [research paper](https://arxiv.org/pdf/2309.04662.pdf) for further details. ## Training Data > For both the machine translation and language model, MADLAD-400 is used. For the machine translation > model, a combination of parallel datasources covering 157 languages is also used. Further details are > described in the [paper](https://arxiv.org/pdf/2309.04662.pdf). ## Training Procedure See the [research paper](https://arxiv.org/pdf/2309.04662.pdf) for further details. # Evaluation ## Testing Data, Factors & Metrics > For evaluation, we used WMT, NTREX, Flores-200 and Gatones datasets as described in Section 4.3 in the [paper](https://arxiv.org/pdf/2309.04662.pdf). > The translation quality of this model varies based on language, as seen in the paper, and likely varies on > domain, though we have not assessed this. ## Results ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b7f632037d6452a321fa15/EzsMD1AwCuFH0S0DeD-n8.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b7f632037d6452a321fa15/CJ5zCUVy7vTU76Lc8NZcK.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b7f632037d6452a321fa15/NK0S-yVeWuhKoidpLYh3m.png) See the [research paper](https://arxiv.org/pdf/2309.04662.pdf) for further details. # Environmental Impact More information needed # Citation **BibTeX:** ```bibtex @misc{kudugunta2023madlad400, title={MADLAD-400: A Multilingual And Document-Level Large Audited Dataset}, author={Sneha Kudugunta and Isaac Caswell and Biao Zhang and Xavier Garcia and Christopher A. Choquette-Choo and Katherine Lee and Derrick Xin and Aditya Kusupati and Romi Stella and Ankur Bapna and Orhan Firat}, year={2023}, eprint={2309.04662}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
stabilityai/stable-diffusion-x4-upscaler
stabilityai
"2023-07-05T16:19:13Z"
98,351
601
diffusers
[ "diffusers", "safetensors", "stable-diffusion", "arxiv:2112.10752", "arxiv:2202.00512", "arxiv:1910.09700", "license:openrail++", "has_space", "diffusers:StableDiffusionUpscalePipeline", "region:us" ]
null
"2022-11-23T17:42:04Z"
--- license: openrail++ tags: - stable-diffusion inference: false --- # Stable Diffusion x4 upscaler model card This model card focuses on the model associated with the Stable Diffusion Upscaler, available [here](https://github.com/Stability-AI/stablediffusion). This model is trained for 1.25M steps on a 10M subset of LAION containing images `>2048x2048`. The model was trained on crops of size `512x512` and is a text-guided [latent upscaling diffusion model](https://arxiv.org/abs/2112.10752). In addition to the textual input, it receives a `noise_level` as an input parameter, which can be used to add noise to the low-resolution input according to a [predefined diffusion schedule](configs/stable-diffusion/x4-upscaling.yaml). ![Image](https://github.com/Stability-AI/stablediffusion/raw/main/assets/stable-samples/upscaling/merged-dog.png) - Use it with the [`stablediffusion`](https://github.com/Stability-AI/stablediffusion) repository: download the `x4-upscaler-ema.ckpt` [here](https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler/resolve/main/x4-upscaler-ema.ckpt). - Use it with 🧨 [`diffusers`](https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler#examples) ## Model Details - **Developed by:** Robin Rombach, Patrick Esser - **Model type:** Diffusion-based text-to-image generation model - **Language(s):** English - **License:** [CreativeML Open RAIL++-M License](https://huggingface.co/stabilityai/stable-diffusion-2/blob/main/LICENSE-MODEL) - **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a [Latent Diffusion Model](https://arxiv.org/abs/2112.10752) that uses a fixed, pretrained text encoder ([OpenCLIP-ViT/H](https://github.com/mlfoundations/open_clip)). - **Resources for more information:** [GitHub Repository](https://github.com/Stability-AI/). - **Cite as:** @InProceedings{Rombach_2022_CVPR, author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn}, title = {High-Resolution Image Synthesis With Latent Diffusion Models}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2022}, pages = {10684-10695} } ## Examples Using the [🤗's Diffusers library](https://github.com/huggingface/diffusers) to run Stable Diffusion 2 in a simple and efficient manner. ```bash pip install diffusers transformers accelerate scipy safetensors ``` ```python import requests from PIL import Image from io import BytesIO from diffusers import StableDiffusionUpscalePipeline import torch # load model and scheduler model_id = "stabilityai/stable-diffusion-x4-upscaler" pipeline = StableDiffusionUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipeline = pipeline.to("cuda") # let's download an image url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale/low_res_cat.png" response = requests.get(url) low_res_img = Image.open(BytesIO(response.content)).convert("RGB") low_res_img = low_res_img.resize((128, 128)) prompt = "a white cat" upscaled_image = pipeline(prompt=prompt, image=low_res_img).images[0] upscaled_image.save("upsampled_cat.png") ``` **Notes**: - Despite not being a dependency, we highly recommend you to install [xformers](https://github.com/facebookresearch/xformers) for memory efficient attention (better performance) - If you have low GPU RAM available, make sure to add a `pipe.enable_attention_slicing()` after sending it to `cuda` for less VRAM usage (to the cost of speed) # Uses ## Direct Use The model is intended for research purposes only. Possible research areas and tasks include - Safe deployment of models which have the potential to generate harmful content. - Probing and understanding the limitations and biases of generative models. - Generation of artworks and use in design and other artistic processes. - Applications in educational or creative tools. - Research on generative models. Excluded uses are described below. ### Misuse, Malicious Use, and Out-of-Scope Use _Note: This section is originally taken from the [DALLE-MINI model card](https://huggingface.co/dalle-mini/dalle-mini), was used for Stable Diffusion v1, but applies in the same way to Stable Diffusion v2_. The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes. #### Out-of-Scope Use The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model. #### Misuse and Malicious Use Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to: - Generating demeaning, dehumanizing, or otherwise harmful representations of people or their environments, cultures, religions, etc. - Intentionally promoting or propagating discriminatory content or harmful stereotypes. - Impersonating individuals without their consent. - Sexual content without consent of the people who might see it. - Mis- and disinformation - Representations of egregious violence and gore - Sharing of copyrighted or licensed material in violation of its terms of use. - Sharing content that is an alteration of copyrighted or licensed material in violation of its terms of use. ## Limitations and Bias ### Limitations - The model does not achieve perfect photorealism - The model cannot render legible text - The model does not perform well on more difficult tasks which involve compositionality, such as rendering an image corresponding to “A red cube on top of a blue sphere” - Faces and people in general may not be generated properly. - The model was trained mainly with English captions and will not work as well in other languages. - The autoencoding part of the model is lossy - The model was trained on a subset of the large-scale dataset [LAION-5B](https://laion.ai/blog/laion-5b/), which contains adult, violent and sexual content. To partially mitigate this, we have filtered the dataset using LAION's NFSW detector (see Training section). ### Bias While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases. Stable Diffusion vw was primarily trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/), which consists of images that are limited to English descriptions. Texts and images from communities and cultures that use other languages are likely to be insufficiently accounted for. This affects the overall output of the model, as white and western cultures are often set as the default. Further, the ability of the model to generate content with non-English prompts is significantly worse than with English-language prompts. Stable Diffusion v2 mirrors and exacerbates biases to such a degree that viewer discretion must be advised irrespective of the input or its intent. ## Training **Training Data** The model developers used the following dataset for training the model: - LAION-5B and subsets (details below). The training data is further filtered using LAION's NSFW detector, with a "p_unsafe" score of 0.1 (conservative). For more details, please refer to LAION-5B's [NeurIPS 2022](https://openreview.net/forum?id=M3Y74vmsMcY) paper and reviewer discussions on the topic. **Training Procedure** Stable Diffusion v2 is a latent diffusion model which combines an autoencoder with a diffusion model that is trained in the latent space of the autoencoder. During training, - Images are encoded through an encoder, which turns images into latent representations. The autoencoder uses a relative downsampling factor of 8 and maps images of shape H x W x 3 to latents of shape H/f x W/f x 4 - Text prompts are encoded through the OpenCLIP-ViT/H text-encoder. - The output of the text encoder is fed into the UNet backbone of the latent diffusion model via cross-attention. - The loss is a reconstruction objective between the noise that was added to the latent and the prediction made by the UNet. We also use the so-called _v-objective_, see https://arxiv.org/abs/2202.00512. We currently provide the following checkpoints: - `512-base-ema.ckpt`: 550k steps at resolution `256x256` on a subset of [LAION-5B](https://laion.ai/blog/laion-5b/) filtered for explicit pornographic material, using the [LAION-NSFW classifier](https://github.com/LAION-AI/CLIP-based-NSFW-Detector) with `punsafe=0.1` and an [aesthetic score](https://github.com/christophschuhmann/improved-aesthetic-predictor) >= `4.5`. 850k steps at resolution `512x512` on the same dataset with resolution `>= 512x512`. - `768-v-ema.ckpt`: Resumed from `512-base-ema.ckpt` and trained for 150k steps using a [v-objective](https://arxiv.org/abs/2202.00512) on the same dataset. Resumed for another 140k steps on a `768x768` subset of our dataset. - `512-depth-ema.ckpt`: Resumed from `512-base-ema.ckpt` and finetuned for 200k steps. Added an extra input channel to process the (relative) depth prediction produced by [MiDaS](https://github.com/isl-org/MiDaS) (`dpt_hybrid`) which is used as an additional conditioning. The additional input channels of the U-Net which process this extra information were zero-initialized. - `512-inpainting-ema.ckpt`: Resumed from `512-base-ema.ckpt` and trained for another 200k steps. Follows the mask-generation strategy presented in [LAMA](https://github.com/saic-mdal/lama) which, in combination with the latent VAE representations of the masked image, are used as an additional conditioning. The additional input channels of the U-Net which process this extra information were zero-initialized. The same strategy was used to train the [1.5-inpainting checkpoint](https://github.com/saic-mdal/lama). - `x4-upscaling-ema.ckpt`: Trained for 1.25M steps on a 10M subset of LAION containing images `>2048x2048`. The model was trained on crops of size `512x512` and is a text-guided [latent upscaling diffusion model](https://arxiv.org/abs/2112.10752). In addition to the textual input, it receives a `noise_level` as an input parameter, which can be used to add noise to the low-resolution input according to a [predefined diffusion schedule](configs/stable-diffusion/x4-upscaling.yaml). - **Hardware:** 32 x 8 x A100 GPUs - **Optimizer:** AdamW - **Gradient Accumulations**: 1 - **Batch:** 32 x 8 x 2 x 4 = 2048 - **Learning rate:** warmup to 0.0001 for 10,000 steps and then kept constant ## Evaluation Results Evaluations with different classifier-free guidance scales (1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0) and 50 steps DDIM sampling steps show the relative improvements of the checkpoints: ![pareto](model-variants.jpg) Evaluated using 50 DDIM steps and 10000 random prompts from the COCO2017 validation set, evaluated at 512x512 resolution. Not optimized for FID scores. ## Environmental Impact **Stable Diffusion v1** **Estimated Emissions** Based on that information, we estimate the following CO2 emissions using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). The hardware, runtime, cloud provider, and compute region were utilized to estimate the carbon impact. - **Hardware Type:** A100 PCIe 40GB - **Hours used:** 200000 - **Cloud Provider:** AWS - **Compute Region:** US-east - **Carbon Emitted (Power consumption x Time x Carbon produced based on location of power grid):** 15000 kg CO2 eq. ## Citation @InProceedings{Rombach_2022_CVPR, author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn}, title = {High-Resolution Image Synthesis With Latent Diffusion Models}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2022}, pages = {10684-10695} } *This model card was written by: Robin Rombach, Patrick Esser and David Ha and is based on the [Stable Diffusion v1](https://github.com/CompVis/stable-diffusion/blob/main/Stable_Diffusion_v1_Model_Card.md) and [DALL-E Mini model card](https://huggingface.co/dalle-mini/dalle-mini).*
Davlan/distilbert-base-multilingual-cased-ner-hrl
Davlan
"2023-08-14T19:34:34Z"
98,022
79
transformers
[ "transformers", "pytorch", "tf", "safetensors", "distilbert", "token-classification", "license:afl-3.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
token-classification
"2022-03-02T23:29:04Z"
--- license: afl-3.0 --- Hugging Face's logo --- language: - ar - de - en - es - fr - it - lv - nl - pt - zh - multilingual --- # distilbert-base-multilingual-cased-ner-hrl ## Model description **distilbert-base-multilingual-cased-ner-hrl** is a **Named Entity Recognition** model for 10 high resourced languages (Arabic, German, English, Spanish, French, Italian, Latvian, Dutch, Portuguese and Chinese) based on a fine-tuned Distiled BERT base model. It has been trained to recognize three types of entities: location (LOC), organizations (ORG), and person (PER). Specifically, this model is a *distilbert-base-multilingual-cased* model that was fine-tuned on an aggregation of 10 high-resourced languages ## Intended uses & limitations #### How to use You can use this model with Transformers *pipeline* for NER. ```python from transformers import AutoTokenizer, AutoModelForTokenClassification from transformers import pipeline tokenizer = AutoTokenizer.from_pretrained("Davlan/distilbert-base-multilingual-cased-ner-hrl") model = AutoModelForTokenClassification.from_pretrained("Davlan/distilbert-base-multilingual-cased-ner-hrl") nlp = pipeline("ner", model=model, tokenizer=tokenizer) example = "Nader Jokhadar had given Syria the lead with a well-struck header in the seventh minute." ner_results = nlp(example) print(ner_results) ``` #### Limitations and bias This model is limited by its training dataset of entity-annotated news articles from a specific span of time. This may not generalize well for all use cases in different domains. ## Training data The training data for the 10 languages are from: Language|Dataset -|- Arabic | [ANERcorp](https://camel.abudhabi.nyu.edu/anercorp/) German | [conll 2003](https://www.clips.uantwerpen.be/conll2003/ner/) English | [conll 2003](https://www.clips.uantwerpen.be/conll2003/ner/) Spanish | [conll 2002](https://www.clips.uantwerpen.be/conll2002/ner/) French | [Europeana Newspapers](https://github.com/EuropeanaNewspapers/ner-corpora/tree/master/enp_FR.bnf.bio) Italian | [Italian I-CAB](https://ontotext.fbk.eu/icab.html) Latvian | [Latvian NER](https://github.com/LUMII-AILab/FullStack/tree/master/NamedEntities) Dutch | [conll 2002](https://www.clips.uantwerpen.be/conll2002/ner/) Portuguese |[Paramopama + Second Harem](https://github.com/davidsbatista/NER-datasets/tree/master/Portuguese) Chinese | [MSRA](https://huggingface.co/datasets/msra_ner) The training dataset distinguishes between the beginning and continuation of an entity so that if there are back-to-back entities of the same type, the model can output where the second entity begins. As in the dataset, each token will be classified as one of the following classes: Abbreviation|Description -|- O|Outside of a named entity B-PER |Beginning of a person’s name right after another person’s name I-PER |Person’s name B-ORG |Beginning of an organisation right after another organisation I-ORG |Organisation B-LOC |Beginning of a location right after another location I-LOC |Location ## Training procedure This model was trained on NVIDIA V100 GPU with recommended hyperparameters from HuggingFace code.
llava-hf/llava-v1.6-mistral-7b-hf
llava-hf
"2024-03-21T14:39:54Z"
98,020
85
transformers
[ "transformers", "safetensors", "llava_next", "pretraining", "vision", "image-text-to-text", "arxiv:2310.03744", "endpoints_compatible", "has_space", "region:us" ]
image-text-to-text
"2024-02-20T08:01:48Z"
--- tags: - vision - image-text-to-text --- # LLaVa-Next, leveraging [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) as LLM The LLaVA-NeXT model was proposed in [LLaVA-NeXT: Improved reasoning, OCR, and world knowledge](https://llava-vl.github.io/blog/2024-01-30-llava-next/) by Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, Yong Jae Lee. LLaVa-NeXT (also called LLaVa-1.6) improves upon [LLaVa-1.5](https://huggingface.co/transformers/main/model_doc/llava.html) by increasing the input image resolution and training on an improved visual instruction tuning dataset to improve OCR and common sense reasoning. Disclaimer: The team releasing LLaVa-NeXT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description LLaVa combines a pre-trained large language model with a pre-trained vision encoder for multimodal chatbot use cases. LLaVA 1.6 improves on LLaVA 1.5 BY: - Using [Mistral-7B](https://mistral.ai/news/announcing-mistral-7b/) (for this checkpoint) and [Nous-Hermes-2-Yi-34B](https://huggingface.co/NousResearch/Nous-Hermes-2-Yi-34B) which has better commercial licenses, and bilingual support - More diverse and high quality data mixture - Dynamic high resolution ![image/png](https://cdn-uploads.huggingface.co/production/uploads/62441d1d9fdefb55a0b7d12c/FPshq08TKYD0e-qwPLDVO.png) ## Intended uses & limitations You can use the raw model for tasks like image captioning, visual question answering, multimodal chatbot use cases. See the [model hub](https://huggingface.co/models?search=llava-hf) to look for other versions on a task that interests you. ### How to use Here's the prompt template for this model: ``` "[INST] <image>\nWhat is shown in this image? [/INST]" ``` You can load and use the model like following: ```python from transformers import LlavaNextProcessor, LlavaNextForConditionalGeneration import torch from PIL import Image import requests processor = LlavaNextProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf") model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf", torch_dtype=torch.float16, low_cpu_mem_usage=True) model.to("cuda:0") # prepare image and text prompt, using the appropriate prompt template url = "https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/images/llava_v1_5_radar.jpg?raw=true" image = Image.open(requests.get(url, stream=True).raw) prompt = "[INST] <image>\nWhat is shown in this image? [/INST]" inputs = processor(prompt, image, return_tensors="pt").to("cuda:0") # autoregressively complete prompt output = model.generate(**inputs, max_new_tokens=100) print(processor.decode(output[0], skip_special_tokens=True)) ``` ### Model optimization #### 4-bit quantization through `bitsandbytes` library First make sure to install `bitsandbytes`, `pip install bitsandbytes` and make sure to have access to a CUDA compatible GPU device. Simply change the snippet above with: ```diff model = LlavaNextForConditionalGeneration.from_pretrained( model_id, torch_dtype=torch.float16, low_cpu_mem_usage=True, + load_in_4bit=True ) ``` #### Use Flash-Attention 2 to further speed-up generation First make sure to install `flash-attn`. Refer to the [original repository of Flash Attention](https://github.com/Dao-AILab/flash-attention) regarding that package installation. Simply change the snippet above with: ```diff model = LlavaNextForConditionalGeneration.from_pretrained( model_id, torch_dtype=torch.float16, low_cpu_mem_usage=True, + use_flash_attention_2=True ).to(0) ``` ### BibTeX entry and citation info ```bibtex @misc{liu2023improved, title={Improved Baselines with Visual Instruction Tuning}, author={Haotian Liu and Chunyuan Li and Yuheng Li and Yong Jae Lee}, year={2023}, eprint={2310.03744}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
google/tapas-large-finetuned-wtq
google
"2023-09-05T14:48:42Z"
97,702
89
transformers
[ "transformers", "pytorch", "tf", "safetensors", "tapas", "table-question-answering", "en", "dataset:wikitablequestions", "arxiv:2004.02349", "arxiv:2010.00571", "arxiv:1508.00305", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
table-question-answering
"2022-03-02T23:29:05Z"
--- language: en tags: - tapas - table-question-answering license: apache-2.0 datasets: - wikitablequestions --- # TAPAS large model fine-tuned on WikiTable Questions (WTQ) This model has 2 versions which can be used. The default version corresponds to the `tapas_wtq_wikisql_sqa_inter_masklm_large_reset` checkpoint of the [original Github repository](https://github.com/google-research/tapas). This model was pre-trained on MLM and an additional step which the authors call intermediate pre-training, and then fine-tuned in a chain on [SQA](https://www.microsoft.com/en-us/download/details.aspx?id=54253), [WikiSQL](https://github.com/salesforce/WikiSQL) and finally [WTQ](https://github.com/ppasupat/WikiTableQuestions). It uses relative position embeddings (i.e. resetting the position index at every cell of the table). The other (non-default) version which can be used is: - `no_reset`, which corresponds to `tapas_wtq_wikisql_sqa_inter_masklm_large` (intermediate pre-training, absolute position embeddings). Disclaimer: The team releasing TAPAS did not write a model card for this model so this model card has been written by the Hugging Face team and contributors. ## Results Size | Reset | Dev Accuracy | Link -------- | --------| -------- | ---- **LARGE** | **noreset** | **0.5062** | [tapas-large-finetuned-wtq (with absolute pos embeddings)](https://huggingface.co/google/tapas-large-finetuned-wtq/tree/no_reset) **LARGE** | **reset** | **0.5097** | [tapas-large-finetuned-wtq](https://huggingface.co/google/tapas-large-finetuned-wtq/tree/main) BASE | noreset | 0.4525 | [tapas-base-finetuned-wtq (with absolute pos embeddings)](https://huggingface.co/google/tapas-base-finetuned-wtq/tree/no_reset) BASE | reset | 0.4638 | [tapas-base-finetuned-wtq](https://huggingface.co/google/tapas-base-finetuned-wtq/tree/main) MEDIUM | noreset | 0.4324 | [tapas-medium-finetuned-wtq (with absolute pos embeddings)](https://huggingface.co/google/tapas-medium-finetuned-wtq/tree/no_reset) MEDIUM | reset | 0.4324 | [tapas-medium-finetuned-wtq](https://huggingface.co/google/tapas-medium-finetuned-wtq/tree/main) SMALL | noreset | 0.3681 | [tapas-small-finetuned-wtq (with absolute pos embeddings)](https://huggingface.co/google/tapas-small-finetuned-wtq/tree/no_reset) SMALL | reset | 0.3762 | [tapas-small-finetuned-wtq](https://huggingface.co/google/tapas-small-finetuned-wtq/tree/main) MINI | noreset | 0.2783 | [tapas-mini-finetuned-wtq (with absolute pos embeddings)](https://huggingface.co/google/tapas-mini-finetuned-wtq/tree/no_reset) MINI | reset | 0.2854 | [tapas-mini-finetuned-wtq](https://huggingface.co/google/tapas-mini-finetuned-wtq/tree/main) TINY | noreset | 0.0823 | [tapas-tiny-finetuned-wtq (with absolute pos embeddings)](https://huggingface.co/google/tapas-tiny-finetuned-wtq/tree/no_reset) TINY | reset | 0.1039 | [tapas-tiny-finetuned-wtq](https://huggingface.co/google/tapas-tiny-finetuned-wtq/tree/main) ## Model description TAPAS is a BERT-like transformers model pretrained on a large corpus of English data from Wikipedia in a self-supervised fashion. This means it was pretrained on the raw tables and associated texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was pretrained with two objectives: - Masked language modeling (MLM): taking a (flattened) table and associated context, the model randomly masks 15% of the words in the input, then runs the entire (partially masked) sequence through the model. The model then has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like GPT which internally mask the future tokens. It allows the model to learn a bidirectional representation of a table and associated text. - Intermediate pre-training: to encourage numerical reasoning on tables, the authors additionally pre-trained the model by creating a balanced dataset of millions of syntactically created training examples. Here, the model must predict (classify) whether a sentence is supported or refuted by the contents of a table. The training examples are created based on synthetic as well as counterfactual statements. This way, the model learns an inner representation of the English language used in tables and associated texts, which can then be used to extract features useful for downstream tasks such as answering questions about a table, or determining whether a sentence is entailed or refuted by the contents of a table. Fine-tuning is done by adding a cell selection head and aggregation head on top of the pre-trained model, and then jointly train these randomly initialized classification heads with the base model on SQa, WikiSQL and finally WTQ. ## Intended uses & limitations You can use this model for answering questions related to a table. For code examples, we refer to the documentation of TAPAS on the HuggingFace website. ## Training procedure ### Preprocessing The texts are lowercased and tokenized using WordPiece and a vocabulary size of 30,000. The inputs of the model are then of the form: ``` [CLS] Question [SEP] Flattened table [SEP] ``` The authors did first convert the WTQ dataset into the format of SQA using automatic conversion scripts. ### Fine-tuning The model was fine-tuned on 32 Cloud TPU v3 cores for 50,000 steps with maximum sequence length 512 and batch size of 512. In this setup, fine-tuning takes around 10 hours. The optimizer used is Adam with a learning rate of 1.93581e-5, and a warmup ratio of 0.128960. An inductive bias is added such that the model only selects cells of the same column. This is reflected by the `select_one_column` parameter of `TapasConfig`. See the [paper](https://arxiv.org/abs/2004.02349) for more details (tables 11 and 12). ### BibTeX entry and citation info ```bibtex @misc{herzig2020tapas, title={TAPAS: Weakly Supervised Table Parsing via Pre-training}, author={Jonathan Herzig and Paweł Krzysztof Nowak and Thomas Müller and Francesco Piccinno and Julian Martin Eisenschlos}, year={2020}, eprint={2004.02349}, archivePrefix={arXiv}, primaryClass={cs.IR} } ``` ```bibtex @misc{eisenschlos2020understanding, title={Understanding tables with intermediate pre-training}, author={Julian Martin Eisenschlos and Syrine Krichene and Thomas Müller}, year={2020}, eprint={2010.00571}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ```bibtex @article{DBLP:journals/corr/PasupatL15, author = {Panupong Pasupat and Percy Liang}, title = {Compositional Semantic Parsing on Semi-Structured Tables}, journal = {CoRR}, volume = {abs/1508.00305}, year = {2015}, url = {http://arxiv.org/abs/1508.00305}, archivePrefix = {arXiv}, eprint = {1508.00305}, timestamp = {Mon, 13 Aug 2018 16:47:37 +0200}, biburl = {https://dblp.org/rec/journals/corr/PasupatL15.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
dangvantuan/sentence-camembert-large
dangvantuan
"2023-09-12T11:38:28Z"
97,437
52
transformers
[ "transformers", "pytorch", "tf", "safetensors", "camembert", "feature-extraction", "Text", "Sentence Similarity", "Sentence-Embedding", "camembert-large", "sentence-similarity", "fr", "dataset:stsb_multi_mt", "arxiv:1908.10084", "license:apache-2.0", "model-index", "endpoints_compatible", "has_space", "region:us" ]
sentence-similarity
"2022-03-02T23:29:05Z"
--- pipeline_tag: sentence-similarity language: fr datasets: - stsb_multi_mt tags: - Text - Sentence Similarity - Sentence-Embedding - camembert-large license: apache-2.0 model-index: - name: sentence-camembert-large by Van Tuan DANG results: - task: name: Sentence-Embedding type: Text Similarity dataset: name: Text Similarity fr type: stsb_multi_mt args: fr metrics: - name: Test Pearson correlation coefficient type: Pearson_correlation_coefficient value: xx.xx --- ## Description: [**Sentence-CamemBERT-Large**](https://huggingface.co/dangvantuan/sentence-camembert-large) is the Embedding Model for French developed by [La Javaness](https://www.lajavaness.com/). The purpose of this embedding model is to represent the content and semantics of a French sentence in a mathematical vector which allows it to understand the meaning of the text-beyond individual words in queries and documents, offering a powerful semantic search. ## Pre-trained sentence embedding models are state-of-the-art of Sentence Embeddings for French. The model is Fine-tuned using pre-trained [facebook/camembert-large](https://huggingface.co/camembert/camembert-large) and [Siamese BERT-Networks with 'sentences-transformers'](https://www.sbert.net/) on dataset [stsb](https://huggingface.co/datasets/stsb_multi_mt/viewer/fr/train) ## Usage The model can be used directly (without a language model) as follows: ```python from sentence_transformers import SentenceTransformer model = SentenceTransformer("dangvantuan/sentence-camembert-large") sentences = ["Un avion est en train de décoller.", "Un homme joue d'une grande flûte.", "Un homme étale du fromage râpé sur une pizza.", "Une personne jette un chat au plafond.", "Une personne est en train de plier un morceau de papier.", ] embeddings = model.encode(sentences) ``` ## Evaluation The model can be evaluated as follows on the French test data of stsb. ```python from sentence_transformers import SentenceTransformer from sentence_transformers.readers import InputExample from datasets import load_dataset def convert_dataset(dataset): dataset_samples=[] for df in dataset: score = float(df['similarity_score'])/5.0 # Normalize score to range 0 ... 1 inp_example = InputExample(texts=[df['sentence1'], df['sentence2']], label=score) dataset_samples.append(inp_example) return dataset_samples # Loading the dataset for evaluation df_dev = load_dataset("stsb_multi_mt", name="fr", split="dev") df_test = load_dataset("stsb_multi_mt", name="fr", split="test") # Convert the dataset for evaluation # For Dev set: dev_samples = convert_dataset(df_dev) val_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name='sts-dev') val_evaluator(model, output_path="./") # For Test set: test_samples = convert_dataset(df_test) test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='sts-test') test_evaluator(model, output_path="./") ``` **Test Result**: The performance is measured using Pearson and Spearman correlation: - On dev | Model | Pearson correlation | Spearman correlation | #params | | ------------- | ------------- | ------------- |------------- | | [dangvantuan/sentence-camembert-large](https://huggingface.co/dangvantuan/sentence-camembert-large)| 88.2 |88.02 | 336M| | [dangvantuan/sentence-camembert-base](https://huggingface.co/dangvantuan/sentence-camembert-base) | 86.73|86.54 | 110M | | [distiluse-base-multilingual-cased](https://huggingface.co/sentence-transformers/distiluse-base-multilingual-cased) | 79.22 | 79.16|135M | | [GPT-3 (text-davinci-003)](https://platform.openai.com/docs/models) | 85 | NaN|175B | | [GPT-(text-embedding-ada-002)](https://platform.openai.com/docs/models) | 79.75 | 80.44|NaN | - On test | Model | Pearson correlation | Spearman correlation | | ------------- | ------------- | ------------- | | [dangvantuan/sentence-camembert-large](https://huggingface.co/dangvantuan/sentence-camembert-large)| 85.9 | 85.8| | [dangvantuan/sentence-camembert-base](https://huggingface.co/dangvantuan/sentence-camembert-base)| 82.36 | 81.64| | [distiluse-base-multilingual-cased](https://huggingface.co/sentence-transformers/distiluse-base-multilingual-cased) | 78.62 | 77.48| | [GPT-3 (text-davinci-003)](https://platform.openai.com/docs/models) | 82 | NaN|175B | | [GPT-(text-embedding-ada-002)](https://platform.openai.com/docs/models) | 79.05 | 77.56|NaN | ## Citation @article{reimers2019sentence, title={Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks}, author={Nils Reimers, Iryna Gurevych}, journal={https://arxiv.org/abs/1908.10084}, year={2019} } @article{martin2020camembert, title={CamemBERT: a Tasty French Language Mode}, author={Martin, Louis and Muller, Benjamin and Su{\'a}rez, Pedro Javier Ortiz and Dupont, Yoann and Romary, Laurent and de la Clergerie, {\'E}ric Villemonte and Seddah, Djam{\'e} and Sagot, Beno{\^\i}t}, journal={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics}, year={2020} }
Qdrant/bge-base-en-v1.5-onnx-Q
Qdrant
"2024-01-16T08:00:14Z"
97,095
0
transformers
[ "transformers", "onnx", "bert", "feature-extraction", "endpoints_compatible", "region:us" ]
feature-extraction
"2024-01-16T07:59:44Z"
Entry not found
BubbleSheep/Hgn_trans_en2zh
BubbleSheep
"2022-08-22T10:14:19Z"
97,035
3
transformers
[ "transformers", "pytorch", "marian", "text2text-generation", "translation", "en", "zh", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
translation
"2022-07-28T14:03:50Z"
--- language: - en - zh thumbnail: "url to a thumbnail used in social sharing" tags: - translation license: apache-2.0 datasets: - THUOCL清华大学开放中文词库 metrics: - bleu --- # Model Details - **Model Description:** This model has been pre-trained for English-Chinese Translation, and use datasets of THUOCL to fine tune the model. - **source group**: English - **target group**: Chinese - **Parent Model:** Helsinki-NLP/opus-mt-en-zh, see https://huggingface.co/Helsinki-NLP/opus-mt-en-zh - **Model Type:** Translation #### Training Data - 清华大学中文开放词库(THUOCL) - **Data link**: http://thuocl.thunlp.org/ ## How to Get Started With the Model ```python from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("BubbleSheep/Hgn_trans_en2zh") model = AutoModelForSeq2SeqLM.from_pretrained("BubbleSheep/Hgn_trans_en2zh") ```
microsoft/phi-1_5
microsoft
"2024-02-06T12:36:39Z"
97,030
1,258
transformers
[ "transformers", "pytorch", "phi", "text-generation", "nlp", "code", "custom_code", "en", "arxiv:2309.05463", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2023-09-10T04:03:46Z"
--- license: mit license_link: https://huggingface.co/microsoft/phi-1_5/resolve/main/LICENSE language: - en pipeline_tag: text-generation tags: - nlp - code --- ## Model Summary The language model Phi-1.5 is a Transformer with **1.3 billion** parameters. It was trained using the same data sources as [phi-1](https://huggingface.co/microsoft/phi-1), augmented with a new data source that consists of various NLP synthetic texts. When assessed against benchmarks testing common sense, language understanding, and logical reasoning, Phi-1.5 demonstrates a nearly state-of-the-art performance among models with less than 10 billion parameters. We **did not** fine-tune Phi-1.5 either for **instruction following or through reinforcement learning from human feedback**. The intention behind crafting this open-source model is to provide the research community with a non-restricted small model to explore vital safety challenges, such as reducing toxicity, understanding societal biases, enhancing controllability, and more. For a safer model release, we exclude generic web-crawl data sources such as common-crawl from the training. This strategy prevents direct exposure to potentially harmful online content, enhancing the model's safety without RLHF. However, the model is still vulnerable to generating harmful content. We hope the model can help the research community to further study the safety of language models. Phi-1.5 can write poems, draft emails, create stories, summarize texts, write Python code (such as downloading a Hugging Face transformer model), etc. ## How to Use Phi-1.5 has been integrated in the `transformers` version 4.37.0. If you are using a lower version, ensure that you are doing the following: * When loading the model, ensure that `trust_remote_code=True` is passed as an argument of the `from_pretrained()` function. The current `transformers` version can be verified with: `pip list | grep transformers`. ## Intended Uses Given the nature of the training data, Phi-1.5 is best suited for prompts using the QA format, the chat format, and the code format. Note that Phi-1.5, being a base model, often produces irrelevant text following the main answer. In the following example, we've truncated the answer for illustrative purposes only. ### QA Format: ```markdown Write a detailed analogy between mathematics and a lighthouse. Answer: Mathematics is like a lighthouse, guiding us through the vast ocean of numbers and calculations. Just as a lighthouse illuminates the darkness, mathematics provides us with a clear path to navigate through complex problems. It helps us make sense of the world around us, just like a lighthouse helps ships find their way home. ``` where the model generates the text after "Answer:". ### Chat Format: ```markdown Alice: I don't know why, I'm struggling to maintain focus while studying. Any suggestions? Bob: Have you tried using a timer? It can help you stay on track and avoid distractions. Alice: That's a good idea. I'll give it a try. Charlie: Another thing that can help is to break up your study sessions into smaller chunks. It's easier to concentrate on one thing at a time. Alice: That makes sense. I'll try that too. Bob: And don't forget to take breaks! It's important to give your brain a rest so you can come back to your studies with a fresh perspective. Alice: Thanks for the advice, guys. I feel more motivated now. Charlie: No problem, Alice. We're all in this together. Bob: Yeah, and remember that it's okay to ask for help if you need it. We're here to support each other. ``` where the model generates the text after the first "Bob:". ### Code Format: ```python def print_prime(n): """ Print all primes between 1 and n """ primes = [] for num in range(2, n+1): is_prime = True for i in range(2, int(math.sqrt(num))+1): if num % i == 0: is_prime = False break if is_prime: primes.append(num) print(primes) ``` where the model generates the text after the comments. **Notes:** * Phi-1.5-generated text/code should be treated as a starting point rather than a definitive solution for potential use cases. Users should be cautious when employing these models in their applications. * Phi-1.5 has not been tested to ensure that it performs adequately for any production-level application. Please refer to the limitation sections of this document for more details. * If you are using `transformers<4.37.0`, always load the model with `trust_remote_code=True` to prevent side-effects. ## Sample Code ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer torch.set_default_device("cuda") model = AutoModelForCausalLM.from_pretrained("microsoft/phi-1_5", torch_dtype="auto", trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-1_5", trust_remote_code=True) inputs = tokenizer('''def print_prime(n): """ Print all primes between 1 and n """''', return_tensors="pt", return_attention_mask=False) outputs = model.generate(**inputs, max_length=200) text = tokenizer.batch_decode(outputs)[0] print(text) ``` ## Limitations of Phi-1.5 * Generate Inaccurate Code and Facts: The model often produces incorrect code snippets and statements. Users should treat these outputs as suggestions or starting points, not as definitive or accurate solutions. * Limited Scope for code: If the model generates Python scripts that utilize uncommon packages or scripts in other languages, we strongly recommend users manually verify all API uses. * Unreliable Responses to Instruction: The model has not undergone instruction fine-tuning. As a result, it may struggle or fail to adhere to intricate or nuanced instructions provided by users. * Language Limitations: The model is primarily designed to understand standard English. Informal English, slang, or any other language outside of English might pose challenges to its comprehension, leading to potential misinterpretations or errors in response. * Potential Societal Biases: Regardless of the safe data used for its training, the model is not entirely free from societal biases. There's a possibility it may generate content that mirrors these societal biases, particularly if prompted or instructed to do so. We urge users to be aware of this and to exercise caution and critical thinking when interpreting model outputs. * Toxicity: Despite that the model is trained with carefully selected data, the model can still produce harmful content if explicitly prompted or instructed to do so. We chose to release the model for research purposes only -- We hope to help the open-source community develop the most effective ways to reduce the toxicity of a model directly after pretraining. ## Training ### Model * Architecture: a Transformer-based model with next-word prediction objective * Dataset size: 30B tokens * Training tokens: 150B tokens * Precision: fp16 * GPUs: 32xA100-40G * Training time: 8 days ### Software * [PyTorch](https://github.com/pytorch/pytorch) * [DeepSpeed](https://github.com/microsoft/DeepSpeed) * [Flash-Attention](https://github.com/HazyResearch/flash-attention) ### License The model is licensed under the [MIT license](https://huggingface.co/microsoft/phi-1_5/resolve/main/LICENSE). ### Citation You can find the paper at https://arxiv.org/abs/2309.05463. Please cite as: ```bib @article{textbooks2, title={Textbooks Are All You Need II: \textbf{phi-1.5} technical report}, author={Li, Yuanzhi and Bubeck, S{\'e}bastien and Eldan, Ronen and Del Giorno, Allie and Gunasekar, Suriya and Lee, Yin Tat}, journal={arXiv preprint arXiv:2309.05463}, year={2023} } ``` ## Trademarks This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.
Helsinki-NLP/opus-mt-en-fr
Helsinki-NLP
"2024-02-14T17:18:11Z"
96,672
31
transformers
[ "transformers", "pytorch", "tf", "jax", "marian", "text2text-generation", "translation", "en", "fr", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
translation
"2022-03-02T23:29:04Z"
--- pipeline_tag: translation license: apache-2.0 --- ### opus-mt-en-fr * source languages: en * target languages: fr * OPUS readme: [en-fr](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/en-fr/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-02-26.zip](https://object.pouta.csc.fi/OPUS-MT-models/en-fr/opus-2020-02-26.zip) * test set translations: [opus-2020-02-26.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-fr/opus-2020-02-26.test.txt) * test set scores: [opus-2020-02-26.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-fr/opus-2020-02-26.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | newsdiscussdev2015-enfr.en.fr | 33.8 | 0.602 | | newsdiscusstest2015-enfr.en.fr | 40.0 | 0.643 | | newssyscomb2009.en.fr | 29.8 | 0.584 | | news-test2008.en.fr | 27.5 | 0.554 | | newstest2009.en.fr | 29.4 | 0.577 | | newstest2010.en.fr | 32.7 | 0.596 | | newstest2011.en.fr | 34.3 | 0.611 | | newstest2012.en.fr | 31.8 | 0.592 | | newstest2013.en.fr | 33.2 | 0.589 | | Tatoeba.en.fr | 50.5 | 0.672 |
biu-nlp/f-coref
biu-nlp
"2022-11-28T11:35:52Z"
96,606
11
transformers
[ "transformers", "pytorch", "roberta", "fast", "coreference-resolution", "en", "dataset:multi_news", "dataset:ontonotes", "arxiv:2209.04280", "arxiv:2205.12644", "arxiv:1907.10529", "arxiv:2101.00434", "arxiv:2109.04127", "license:mit", "model-index", "endpoints_compatible", "region:us" ]
null
"2022-08-19T12:01:10Z"
--- language: - en tags: - fast - coreference-resolution license: mit datasets: - multi_news - ontonotes metrics: - CoNLL task_categories: - coreference-resolution model-index: - name: biu-nlp/f-coref results: - task: type: coreference-resolution name: coreference-resolution dataset: name: ontonotes type: coreference metrics: - name: Avg. F1 type: CoNLL value: 78.5 --- ## F-Coref: Fast, Accurate and Easy to Use Coreference Resolution [F-Coref](https://arxiv.org/abs/2209.04280) allows to process 2.8K OntoNotes documents in 25 seconds on a V100 GPU (compared to 6 minutes for the [LingMess](https://arxiv.org/abs/2205.12644) model, and to 12 minutes of the popular AllenNLP coreference model) with only a modest drop in accuracy. The fast speed is achieved through a combination of distillation of a compact model from the LingMess model, and an efficient batching implementation using a technique we call leftover Please check the [official repository](https://github.com/shon-otmazgin/fastcoref) for more details and updates. #### Experiments | Model | Runtime | Memory | |-----------------------|---------|---------| | [Joshi et al. (2020)](https://arxiv.org/abs/1907.10529) | 12:06 | 27.4 | | [Otmazgin et al. (2022)](https://arxiv.org/abs/2205.12644) | 06:43 | 4.6 | | + Batching | 06:00 | 6.6 | | [Kirstain et al. (2021)](https://arxiv.org/abs/2101.00434) | 04:37 | 4.4 | | [Dobrovolskii (2021)](https://arxiv.org/abs/2109.04127) | 03:49 | 3.5 | | [F-Coref](https://arxiv.org/abs/2209.04280) | 00:45 | 3.3 | | + Batching | 00:35 | 4.5 | | + Leftovers batching | 00:25 | 4.0 | The inference time(Min:Sec) and memory(GiB) for each model on 2.8K documents. Average of 3 runs. Hardware, NVIDIA Tesla V100 SXM2. ### Citation ``` @inproceedings{Otmazgin2022FcorefFA, title={F-coref: Fast, Accurate and Easy to Use Coreference Resolution}, author={Shon Otmazgin and Arie Cattan and Yoav Goldberg}, booktitle={AACL}, year={2022} } ``` [F-coref: Fast, Accurate and Easy to Use Coreference Resolution](https://aclanthology.org/2022.aacl-demo.6) (Otmazgin et al., AACL-IJCNLP 2022)
ShashwatDash/finetuning-sentiment-model-3000-samples-bert-small
ShashwatDash
"2024-02-02T13:24:45Z"
95,197
0
transformers
[ "transformers", "tensorboard", "safetensors", "bert", "text-classification", "generated_from_trainer", "base_model:prajjwal1/bert-small", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
"2024-02-02T13:18:41Z"
--- license: mit base_model: prajjwal1/bert-small tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: finetuning-sentiment-model-3000-samples-bert-small results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples-bert-small This model is a fine-tuned version of [prajjwal1/bert-small](https://huggingface.co/prajjwal1/bert-small) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3681 - Accuracy: 0.82 - F1: 0.8138 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.35.2 - Pytorch 2.1.0+cu121 - Datasets 2.16.1 - Tokenizers 0.15.1
trl-internal-testing/dummy-GPT2-correct-vocab
trl-internal-testing
"2023-02-08T15:14:11Z"
95,091
0
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-02-08T15:12:33Z"
Entry not found
TheBloke/Mistral-7B-Instruct-v0.1-GGUF
TheBloke
"2023-12-09T16:09:28Z"
94,894
476
transformers
[ "transformers", "gguf", "mistral", "finetuned", "text-generation", "base_model:mistralai/Mistral-7B-Instruct-v0.1", "license:apache-2.0", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2023-09-27T17:49:54Z"
--- base_model: mistralai/Mistral-7B-Instruct-v0.1 inference: false license: apache-2.0 model_creator: Mistral AI model_name: Mistral 7B Instruct v0.1 model_type: mistral pipeline_tag: text-generation prompt_template: '<s>[INST]{prompt} [/INST] ' quantized_by: TheBloke tags: - finetuned --- <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Mistral 7B Instruct v0.1 - GGUF - Model creator: [Mistral AI](https://huggingface.co/mistralai) - Original model: [Mistral 7B Instruct v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) <!-- description start --> ## Description This repo contains GGUF format model files for [Mistral AI's Mistral 7B Instruct v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1). <!-- description end --> <!-- README_GGUF.md-about-gguf start --> ### About GGUF GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp. Here is an incomplate list of clients and libraries that are known to support GGUF: * [llama.cpp](https://github.com/ggerganov/llama.cpp). The source project for GGUF. Offers a CLI and a server option. * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration. * [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for story telling. * [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration. * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection. * [Faraday.dev](https://faraday.dev/), an attractive and easy to use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration. * [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server. * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server. * [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use. <!-- README_GGUF.md-about-gguf end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF) * [Mistral AI's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: Mistral ``` <s>[INST] {prompt} [/INST] ``` <!-- prompt-template end --> <!-- compatibility_gguf start --> ## Compatibility These quantised GGUFv2 files are compatible with llama.cpp from August 27th onwards, as of commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) They are also compatible with many third party UIs and libraries - please see the list at the top of this README. Sequence length note: The model will work at sequence lengths of 4096, or lower. GGUF does not yet have support for the new sliding window sequence length mode, so longer sequence lengths are not supported. ## Explanation of quantisation methods <details> <summary>Click to see details</summary> The new methods available are: * GGML_TYPE_Q2_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw) * GGML_TYPE_Q3_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw. * GGML_TYPE_Q4_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw. * GGML_TYPE_Q5_K - "type-1" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw * GGML_TYPE_Q6_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw Refer to the Provided Files table below to see what files use which methods, and how. </details> <!-- compatibility_gguf end --> <!-- README_GGUF.md-provided-files start --> ## Provided files | Name | Quant method | Bits | Size | Max RAM required | Use case | | ---- | ---- | ---- | ---- | ---- | ----- | | [mistral-7b-instruct-v0.1.Q2_K.gguf](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/blob/main/mistral-7b-instruct-v0.1.Q2_K.gguf) | Q2_K | 2 | 3.08 GB| 5.58 GB | smallest, significant quality loss - not recommended for most purposes | | [mistral-7b-instruct-v0.1.Q3_K_S.gguf](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/blob/main/mistral-7b-instruct-v0.1.Q3_K_S.gguf) | Q3_K_S | 3 | 3.16 GB| 5.66 GB | very small, high quality loss | | [mistral-7b-instruct-v0.1.Q3_K_M.gguf](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/blob/main/mistral-7b-instruct-v0.1.Q3_K_M.gguf) | Q3_K_M | 3 | 3.52 GB| 6.02 GB | very small, high quality loss | | [mistral-7b-instruct-v0.1.Q3_K_L.gguf](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/blob/main/mistral-7b-instruct-v0.1.Q3_K_L.gguf) | Q3_K_L | 3 | 3.82 GB| 6.32 GB | small, substantial quality loss | | [mistral-7b-instruct-v0.1.Q4_0.gguf](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/blob/main/mistral-7b-instruct-v0.1.Q4_0.gguf) | Q4_0 | 4 | 4.11 GB| 6.61 GB | legacy; small, very high quality loss - prefer using Q3_K_M | | [mistral-7b-instruct-v0.1.Q4_K_S.gguf](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/blob/main/mistral-7b-instruct-v0.1.Q4_K_S.gguf) | Q4_K_S | 4 | 4.14 GB| 6.64 GB | small, greater quality loss | | [mistral-7b-instruct-v0.1.Q4_K_M.gguf](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/blob/main/mistral-7b-instruct-v0.1.Q4_K_M.gguf) | Q4_K_M | 4 | 4.37 GB| 6.87 GB | medium, balanced quality - recommended | | [mistral-7b-instruct-v0.1.Q5_0.gguf](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/blob/main/mistral-7b-instruct-v0.1.Q5_0.gguf) | Q5_0 | 5 | 5.00 GB| 7.50 GB | legacy; medium, balanced quality - prefer using Q4_K_M | | [mistral-7b-instruct-v0.1.Q5_K_S.gguf](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/blob/main/mistral-7b-instruct-v0.1.Q5_K_S.gguf) | Q5_K_S | 5 | 5.00 GB| 7.50 GB | large, low quality loss - recommended | | [mistral-7b-instruct-v0.1.Q5_K_M.gguf](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/blob/main/mistral-7b-instruct-v0.1.Q5_K_M.gguf) | Q5_K_M | 5 | 5.13 GB| 7.63 GB | large, very low quality loss - recommended | | [mistral-7b-instruct-v0.1.Q6_K.gguf](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/blob/main/mistral-7b-instruct-v0.1.Q6_K.gguf) | Q6_K | 6 | 5.94 GB| 8.44 GB | very large, extremely low quality loss | | [mistral-7b-instruct-v0.1.Q8_0.gguf](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/blob/main/mistral-7b-instruct-v0.1.Q8_0.gguf) | Q8_0 | 8 | 7.70 GB| 10.20 GB | very large, extremely low quality loss - not recommended | **Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead. <!-- README_GGUF.md-provided-files end --> <!-- README_GGUF.md-how-to-download start --> ## How to download GGUF files **Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single file. The following clients/libraries will automatically download models for you, providing a list of available models to choose from: - LM Studio - LoLLMS Web UI - Faraday.dev ### In `text-generation-webui` Under Download Model, you can enter the model repo: TheBloke/Mistral-7B-Instruct-v0.1-GGUF and below it, a specific filename to download, such as: mistral-7b-instruct-v0.1.Q4_K_M.gguf. Then click Download. ### On the command line, including multiple files at once I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub ``` Then you can download any individual model file to the current directory, at high speed, with a command like this: ```shell huggingface-cli download TheBloke/Mistral-7B-Instruct-v0.1-GGUF mistral-7b-instruct-v0.1.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage</summary> You can also download multiple files at once with a pattern: ```shell huggingface-cli download TheBloke/Mistral-7B-Instruct-v0.1-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf' ``` For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/Mistral-7B-Instruct-v0.1-GGUF mistral-7b-instruct-v0.1.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command. </details> <!-- README_GGUF.md-how-to-download end --> <!-- README_GGUF.md-how-to-run start --> ## Example `llama.cpp` command Make sure you are using `llama.cpp` from commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later. ```shell ./main -ngl 32 -m mistral-7b-instruct-v0.1.Q4_K_M.gguf --color -c 4096 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "<s>[INST]{prompt} [/INST]" ``` Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. Sequence length can be 4096 or lower. Mistral's sliding window sequence length is not yet supported in llama.cpp, so do not use sequence lengths longer than 4096. If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins` For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) ## How to run in `text-generation-webui` Further instructions here: [text-generation-webui/docs/llama.cpp.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp.md). ## How to run from Python code You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. ### How to load this model in Python code, using ctransformers I have not tested ctransformers with Mistral models. It may work, but will require that you set the `model_type` to `llama` for now, until ctransformers updates with specific support. #### First install the package Run one of the following commands, according to your system: ```shell # Base ctransformers with no GPU acceleration pip install ctransformers # Or with CUDA GPU acceleration pip install ctransformers[cuda] # Or with AMD ROCm GPU acceleration (Linux only) CT_HIPBLAS=1 pip install ctransformers --no-binary ctransformers # Or with Metal GPU acceleration for macOS systems only CT_METAL=1 pip install ctransformers --no-binary ctransformers ``` #### Simple ctransformers example code ```python from ctransformers import AutoModelForCausalLM # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system. llm = AutoModelForCausalLM.from_pretrained("TheBloke/Mistral-7B-Instruct-v0.1-GGUF", model_file="mistral-7b-instruct-v0.1.Q4_K_M.gguf", model_type="mistral", gpu_layers=50) print(llm("AI is going to")) ``` ## How to use with LangChain Here are guides on using llama-cpp-python and ctransformers with LangChain: * [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp) * [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers) <!-- README_GGUF.md-how-to-run end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Alicia Loh, Stephen Murray, K, Ajan Kanaga, RoA, Magnesian, Deo Leter, Olakabola, Eugene Pentland, zynix, Deep Realms, Raymond Fosdick, Elijah Stavena, Iucharbius, Erik Bjäreholt, Luis Javier Navarrete Lozano, Nicholas, theTransient, John Detwiler, alfie_i, knownsqashed, Mano Prime, Willem Michiel, Enrico Ros, LangChain4j, OG, Michael Dempsey, Pierre Kircher, Pedro Madruga, James Bentley, Thomas Belote, Luke @flexchar, Leonard Tan, Johann-Peter Hartmann, Illia Dulskyi, Fen Risland, Chadd, S_X, Jeff Scroggin, Ken Nordquist, Sean Connelly, Artur Olbinski, Swaroop Kallakuri, Jack West, Ai Maven, David Ziegler, Russ Johnson, transmissions 11, John Villwock, Alps Aficionado, Clay Pascal, Viktor Bowallius, Subspace Studios, Rainer Wilmers, Trenton Dambrowitz, vamX, Michael Levine, 준교 김, Brandon Frisco, Kalila, Trailburnt, Randy H, Talal Aujan, Nathan Dryer, Vadim, 阿明, ReadyPlayerEmma, Tiffany J. Kim, George Stoitzev, Spencer Kim, Jerry Meng, Gabriel Tamborski, Cory Kujawski, Jeffrey Morgan, Spiking Neurons AB, Edmond Seymore, Alexandros Triantafyllidis, Lone Striker, Cap'n Zoog, Nikolai Manek, danny, ya boyyy, Derek Yates, usrbinkat, Mandus, TL, Nathan LeClaire, subjectnull, Imad Khwaja, webtim, Raven Klaugh, Asp the Wyvern, Gabriel Puliatti, Caitlyn Gatomon, Joseph William Delisle, Jonathan Leane, Luke Pendergrass, SuperWojo, Sebastain Graf, Will Dee, Fred von Graf, Andrey, Dan Guido, Daniel P. Andersen, Nitin Borwankar, Elle, Vitor Caleffi, biorpg, jjj, NimbleBox.ai, Pieter, Matthew Berman, terasurfer, Michael Davis, Alex, Stanislav Ovsiannikov Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> <!-- original-model-card start --> # Original model card: Mistral AI's Mistral 7B Instruct v0.1 # Model Card for Mistral-7B-Instruct-v0.1 The Mistral-7B-Instruct-v0.1 Large Language Model (LLM) is a instruct fine-tuned version of the [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) generative text model using a variety of publicly available conversation datasets. For full details of this model please read our [release blog post](https://mistral.ai/news/announcing-mistral-7b/) ## Instruction format In order to leverage instruction fine-tuning, your prompt should be surrounded by `[INST]` and `[\INST]` tokens. The very first instruction should begin with a begin of sentence id. The next instructions should not. The assistant generation will be ended by the end-of-sentence token id. E.g. ```python from transformers import AutoModelForCausalLM, AutoTokenizer device = "cuda" # the device to load the model onto model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1") tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1") text = """<s>[INST] What is your favourite condiment? [/INST] Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!</s> [INST] Do you have mayonnaise recipes? [/INST]""" encodeds = tokenizer(text, return_tensors="pt", add_special_tokens=False) model_inputs = encodeds.to(device) model.to(device) generated_ids = model.generate(**model_inputs, max_new_tokens=1000, do_sample=True) decoded = tokenizer.batch_decode(generated_ids) print(decoded[0]) ``` ## Model Architecture This instruction model is based on Mistral-7B-v0.1, a transformer model with the following architecture choices: - Grouped-Query Attention - Sliding-Window Attention - Byte-fallback BPE tokenizer ## The Mistral AI Team Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed. <!-- original-model-card end -->
facebook/esm1v_t33_650M_UR90S_1
facebook
"2022-11-16T12:57:41Z"
94,814
2
transformers
[ "transformers", "pytorch", "tf", "esm", "fill-mask", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
"2022-10-17T15:17:23Z"
Entry not found
jonatasgrosman/wav2vec2-large-xlsr-53-polish
jonatasgrosman
"2022-12-14T01:57:56Z"
94,427
4
transformers
[ "transformers", "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "audio", "hf-asr-leaderboard", "mozilla-foundation/common_voice_6_0", "pl", "robust-speech-event", "speech", "xlsr-fine-tuning-week", "dataset:common_voice", "dataset:mozilla-foundation/common_voice_6_0", "license:apache-2.0", "model-index", "endpoints_compatible", "has_space", "region:us" ]
automatic-speech-recognition
"2022-03-02T23:29:05Z"
--- language: pl license: apache-2.0 datasets: - common_voice - mozilla-foundation/common_voice_6_0 metrics: - wer - cer tags: - audio - automatic-speech-recognition - hf-asr-leaderboard - mozilla-foundation/common_voice_6_0 - pl - robust-speech-event - speech - xlsr-fine-tuning-week model-index: - name: XLSR Wav2Vec2 Polish by Jonatas Grosman results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice pl type: common_voice args: pl metrics: - name: Test WER type: wer value: 14.21 - name: Test CER type: cer value: 3.49 - name: Test WER (+LM) type: wer value: 10.98 - name: Test CER (+LM) type: cer value: 2.93 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Robust Speech Event - Dev Data type: speech-recognition-community-v2/dev_data args: pl metrics: - name: Dev WER type: wer value: 33.18 - name: Dev CER type: cer value: 15.92 - name: Dev WER (+LM) type: wer value: 29.31 - name: Dev CER (+LM) type: cer value: 15.17 --- # Fine-tuned XLSR-53 large model for speech recognition in Polish Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Polish using the train and validation splits of [Common Voice 6.1](https://huggingface.co/datasets/common_voice). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned thanks to the GPU credits generously given by the [OVHcloud](https://www.ovhcloud.com/en/public-cloud/ai-training/) :) The script used for training can be found here: https://github.com/jonatasgrosman/wav2vec2-sprint ## Usage The model can be used directly (without a language model) as follows... Using the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) library: ```python from huggingsound import SpeechRecognitionModel model = SpeechRecognitionModel("jonatasgrosman/wav2vec2-large-xlsr-53-polish") audio_paths = ["/path/to/file.mp3", "/path/to/another_file.wav"] transcriptions = model.transcribe(audio_paths) ``` Writing your own inference script: ```python import torch import librosa from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor LANG_ID = "pl" MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-polish" SAMPLES = 5 test_dataset = load_dataset("common_voice", LANG_ID, split=f"test[:{SAMPLES}]") processor = Wav2Vec2Processor.from_pretrained(MODEL_ID) model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID) # Preprocessing the datasets. # We need to read the audio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = librosa.load(batch["path"], sr=16_000) batch["speech"] = speech_array batch["sentence"] = batch["sentence"].upper() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_sentences = processor.batch_decode(predicted_ids) for i, predicted_sentence in enumerate(predicted_sentences): print("-" * 100) print("Reference:", test_dataset[i]["sentence"]) print("Prediction:", predicted_sentence) ``` | Reference | Prediction | | ------------- | ------------- | | """CZY DRZWI BYŁY ZAMKNIĘTE?""" | PRZY DRZWI BYŁY ZAMKNIĘTE | | GDZIEŻ TU POWÓD DO WYRZUTÓW? | WGDZIEŻ TO POM DO WYRYDÓ | | """O TEM JEDNAK NIE BYŁO MOWY.""" | O TEM JEDNAK NIE BYŁO MOWY | | LUBIĘ GO. | LUBIĄ GO | | — TO MI NIE POMAGA. | TO MNIE NIE POMAGA | | WCIĄŻ LUDZIE WYSIADAJĄ PRZED ZAMKIEM, Z MIASTA, Z PRAGI. | WCIĄŻ LUDZIE WYSIADAJĄ PRZED ZAMKIEM Z MIASTA Z PRAGI | | ALE ON WCALE INACZEJ NIE MYŚLAŁ. | ONY MONITCENIE PONACZUŁA NA MASU | | A WY, CO TAK STOICIE? | A WY CO TAK STOICIE | | A TEN PRZYRZĄD DO CZEGO SŁUŻY? | A TEN PRZYRZĄD DO CZEGO SŁUŻY | | NA JUTRZEJSZYM KOLOKWIUM BĘDZIE PIĘĆ PYTAŃ OTWARTYCH I TEST WIELOKROTNEGO WYBORU. | NAJUTRZEJSZYM KOLOKWIUM BĘDZIE PIĘĆ PYTAŃ OTWARTYCH I TEST WIELOKROTNEGO WYBORU | ## Evaluation 1. To evaluate on `mozilla-foundation/common_voice_6_0` with split `test` ```bash python eval.py --model_id jonatasgrosman/wav2vec2-large-xlsr-53-polish --dataset mozilla-foundation/common_voice_6_0 --config pl --split test ``` 2. To evaluate on `speech-recognition-community-v2/dev_data` ```bash python eval.py --model_id jonatasgrosman/wav2vec2-large-xlsr-53-polish --dataset speech-recognition-community-v2/dev_data --config pl --split validation --chunk_length_s 5.0 --stride_length_s 1.0 ``` ## Citation If you want to cite this model you can use this: ```bibtex @misc{grosman2021xlsr53-large-polish, title={Fine-tuned {XLSR}-53 large model for speech recognition in {P}olish}, author={Grosman, Jonatas}, howpublished={\url{https://huggingface.co/jonatasgrosman/wav2vec2-large-xlsr-53-polish}}, year={2021} } ```
upstage/SOLAR-10.7B-Instruct-v1.0
upstage
"2024-03-29T04:32:58Z"
94,422
569
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "en", "dataset:c-s-ale/alpaca-gpt4-data", "dataset:Open-Orca/OpenOrca", "dataset:Intel/orca_dpo_pairs", "dataset:allenai/ultrafeedback_binarized_cleaned", "arxiv:2312.15166", "arxiv:2403.19270", "base_model:upstage/SOLAR-10.7B-v1.0", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2023-12-12T12:39:22Z"
--- datasets: - c-s-ale/alpaca-gpt4-data - Open-Orca/OpenOrca - Intel/orca_dpo_pairs - allenai/ultrafeedback_binarized_cleaned language: - en license: cc-by-nc-4.0 base_model: - upstage/SOLAR-10.7B-v1.0 --- <p align="left"> <a href="https://go.upstage.ai/solar-obt-hf-modelcardv1-instruct"> <img src="https://huggingface.co/upstage/SOLAR-10.7B-Instruct-v1.0/resolve/main/solar-api-banner.png" width="100%"/> </a> <p> # **Meet 10.7B Solar: Elevating Performance with Upstage Depth UP Scaling!** **(This model is [upstage/SOLAR-10.7B-v1.0](https://huggingface.co/upstage/SOLAR-10.7B-v1.0) fine-tuned version for single-turn conversation.)** # **Introduction** We introduce SOLAR-10.7B, an advanced large language model (LLM) with 10.7 billion parameters, demonstrating superior performance in various natural language processing (NLP) tasks. It's compact, yet remarkably powerful, and demonstrates unparalleled state-of-the-art performance in models with parameters under 30B. We present a methodology for scaling LLMs called depth up-scaling (DUS) , which encompasses architectural modifications and continued pretraining. In other words, we integrated Mistral 7B weights into the upscaled layers, and finally, continued pre-training for the entire model. SOLAR-10.7B has remarkable performance. It outperforms models with up to 30B parameters, even surpassing the recent Mixtral 8X7B model. For detailed information, please refer to the experimental table. Solar 10.7B is an ideal choice for fine-tuning. SOLAR-10.7B offers robustness and adaptability for your fine-tuning needs. Our simple instruction fine-tuning using the SOLAR-10.7B pre-trained model yields significant performance improvements. For full details of this model please read our [paper](https://arxiv.org/abs/2312.15166). # **Instruction Fine-Tuning Strategy** We utilize state-of-the-art instruction fine-tuning methods including supervised fine-tuning (SFT) and direct preference optimization (DPO) [1]. We used a mixture of the following datasets - c-s-ale/alpaca-gpt4-data (SFT) - Open-Orca/OpenOrca (SFT) - in-house generated data utilizing Metamath [2] (SFT, DPO) - Intel/orca_dpo_pairs (DPO) - allenai/ultrafeedback_binarized_cleaned (DPO) where we were careful of data contamination by not using GSM8K samples when generating data and filtering tasks when applicable via the following list. ```python filtering_task_list = [ 'task228_arc_answer_generation_easy', 'ai2_arc/ARC-Challenge:1.0.0', 'ai2_arc/ARC-Easy:1.0.0', 'task229_arc_answer_generation_hard', 'hellaswag:1.1.0', 'task1389_hellaswag_completion', 'cot_gsm8k', 'cot_gsm8k_ii', 'drop:2.0.0', 'winogrande:1.1.0' ] ``` Using the datasets mentioned above, we applied SFT and iterative DPO training, a proprietary alignment strategy, to maximize the performance of our resulting model. [1] Rafailov, R., Sharma, A., Mitchell, E., Ermon, S., Manning, C.D. and Finn, C., 2023. Direct preference optimization: Your language model is secretly a reward model. NeurIPS. [2] Yu, L., Jiang, W., Shi, H., Yu, J., Liu, Z., Zhang, Y., Kwok, J.T., Li, Z., Weller, A. and Liu, W., 2023. Metamath: Bootstrap your own mathematical questions for large language models. arXiv preprint arXiv:2309.12284. # **Data Contamination Test Results** Recently, there have been contamination issues in some models on the LLM leaderboard. We note that we made every effort to exclude any benchmark-related datasets from training. We also ensured the integrity of our model by conducting a data contamination test [3] that is also used by the HuggingFace team [4, 5]. Our results, with `result < 0.1, %:` being well below 0.9, indicate that our model is free from contamination. *The data contamination test results of HellaSwag and Winograde will be added once [3] supports them.* | Model | ARC | MMLU | TruthfulQA | GSM8K | |------------------------------|-------|-------|-------|-------| | **SOLAR-10.7B-Instruct-v1.0**| result < 0.1, %: 0.06 |result < 0.1, %: 0.15 | result < 0.1, %: 0.28 | result < 0.1, %: 0.70 | [3] https://github.com/swj0419/detect-pretrain-code-contamination [4] https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474#657f2245365456e362412a06 [5] https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/265#657b6debf81f6b44b8966230 # **Evaluation Results** | Model | H6 | Model Size | |----------------------------------------|-------|------------| | **SOLAR-10.7B-Instruct-v1.0** | **74.20** | **~ 11B** | | mistralai/Mixtral-8x7B-Instruct-v0.1 | 72.62 | ~ 46.7B | | 01-ai/Yi-34B-200K | 70.81 | ~ 34B | | 01-ai/Yi-34B | 69.42 | ~ 34B | | mistralai/Mixtral-8x7B-v0.1 | 68.42 | ~ 46.7B | | meta-llama/Llama-2-70b-hf | 67.87 | ~ 70B | | tiiuae/falcon-180B | 67.85 | ~ 180B | | **SOLAR-10.7B-v1.0** | **66.04** | **~11B** | | mistralai/Mistral-7B-Instruct-v0.2 | 65.71 | ~ 7B | | Qwen/Qwen-14B | 65.86 | ~ 14B | | 01-ai/Yi-34B-Chat | 65.32 | ~34B | | meta-llama/Llama-2-70b-chat-hf | 62.4 | ~ 70B | | mistralai/Mistral-7B-v0.1 | 60.97 | ~ 7B | | mistralai/Mistral-7B-Instruct-v0.1 | 54.96 | ~ 7B | # **Usage Instructions** This model has been fine-tuned primarily for single-turn conversation, making it less suitable for multi-turn conversations such as chat. ### **Version** Make sure you have the correct version of the transformers library installed: ```sh pip install transformers==4.35.2 ``` ### **Loading the Model** Use the following Python code to load the model: ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("Upstage/SOLAR-10.7B-Instruct-v1.0") model = AutoModelForCausalLM.from_pretrained( "Upstage/SOLAR-10.7B-Instruct-v1.0", device_map="auto", torch_dtype=torch.float16, ) ``` ### **Conducting Single-Turn Conversation** ```python conversation = [ {'role': 'user', 'content': 'Hello?'} ] prompt = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True) inputs = tokenizer(prompt, return_tensors="pt").to(model.device) outputs = model.generate(**inputs, use_cache=True, max_length=4096) output_text = tokenizer.decode(outputs[0]) print(output_text) ``` Below is an example of the output. ``` <s> ### User: Hello? ### Assistant: Hello, how can I assist you today? Please feel free to ask any questions or request help with a specific task.</s> ``` ### **License** - [upstage/SOLAR-10.7B-v1.0](https://huggingface.co/upstage/SOLAR-10.7B-v1.0): apache-2.0 - [upstage/SOLAR-10.7B-Instruct-v1.0](https://huggingface.co/upstage/SOLAR-10.7B-Instruct-v1.0): cc-by-nc-4.0 - Since some non-commercial datasets such as Alpaca are used for fine-tuning, we release this model as cc-by-nc-4.0. ### **How to Cite** Please cite the following papers using the below format when using this model. ```bibtex @misc{kim2023solar, title={SOLAR 10.7B: Scaling Large Language Models with Simple yet Effective Depth Up-Scaling}, author={Dahyun Kim and Chanjun Park and Sanghoon Kim and Wonsung Lee and Wonho Song and Yunsu Kim and Hyeonwoo Kim and Yungi Kim and Hyeonju Lee and Jihoo Kim and Changbae Ahn and Seonghoon Yang and Sukyung Lee and Hyunbyung Park and Gyoungjin Gim and Mikyoung Cha and Hwalsuk Lee and Sunghun Kim}, year={2023}, eprint={2312.15166}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ```bibtext @misc{kim2024sdpo, title={sDPO: Don't Use Your Data All at Once}, author={Dahyun Kim and Yungi Kim and Wonho Song and Hyeonwoo Kim and Yunsu Kim and Sanghoon Kim and Chanjun Park}, year={2024}, eprint={2403.19270}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ### **The Upstage AI Team** ### Upstage is creating the best LLM and DocAI. Please find more information at https://upstage.ai ### **Contact Us** ### Any questions and suggestions, please use the discussion tab. If you want to contact us directly, drop an email to [contact@upstage.ai](mailto:contact@upstage.ai)
dmis-lab/biobert-base-cased-v1.2
dmis-lab
"2021-06-24T02:54:58Z"
94,023
35
transformers
[ "transformers", "pytorch", "bert", "fill-mask", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
fill-mask
"2022-03-02T23:29:05Z"
Entry not found
Helsinki-NLP/opus-mt-en-zh
Helsinki-NLP
"2023-08-16T11:31:42Z"
93,894
273
transformers
[ "transformers", "pytorch", "tf", "jax", "rust", "marian", "text2text-generation", "translation", "en", "zh", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
translation
"2022-03-02T23:29:04Z"
--- language: - en - zh tags: - translation license: apache-2.0 --- ### eng-zho * source group: English * target group: Chinese * OPUS readme: [eng-zho](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-zho/README.md) * model: transformer * source language(s): eng * target language(s): cjy_Hans cjy_Hant cmn cmn_Hans cmn_Hant gan lzh lzh_Hans nan wuu yue yue_Hans yue_Hant * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * a sentence initial language token is required in the form of `>>id<<` (id = valid target language ID) * download original weights: [opus-2020-07-17.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-zho/opus-2020-07-17.zip) * test set translations: [opus-2020-07-17.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-zho/opus-2020-07-17.test.txt) * test set scores: [opus-2020-07-17.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-zho/opus-2020-07-17.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.eng.zho | 31.4 | 0.268 | ### System Info: - hf_name: eng-zho - source_languages: eng - target_languages: zho - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-zho/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['en', 'zh'] - src_constituents: {'eng'} - tgt_constituents: {'cmn_Hans', 'nan', 'nan_Hani', 'gan', 'yue', 'cmn_Kana', 'yue_Hani', 'wuu_Bopo', 'cmn_Latn', 'yue_Hira', 'cmn_Hani', 'cjy_Hans', 'cmn', 'lzh_Hang', 'lzh_Hira', 'cmn_Hant', 'lzh_Bopo', 'zho', 'zho_Hans', 'zho_Hant', 'lzh_Hani', 'yue_Hang', 'wuu', 'yue_Kana', 'wuu_Latn', 'yue_Bopo', 'cjy_Hant', 'yue_Hans', 'lzh', 'cmn_Hira', 'lzh_Yiii', 'lzh_Hans', 'cmn_Bopo', 'cmn_Hang', 'hak_Hani', 'cmn_Yiii', 'yue_Hant', 'lzh_Kana', 'wuu_Hani'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-zho/opus-2020-07-17.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-zho/opus-2020-07-17.test.txt - src_alpha3: eng - tgt_alpha3: zho - short_pair: en-zh - chrF2_score: 0.268 - bleu: 31.4 - brevity_penalty: 0.8959999999999999 - ref_len: 110468.0 - src_name: English - tgt_name: Chinese - train_date: 2020-07-17 - src_alpha2: en - tgt_alpha2: zh - prefer_old: False - long_pair: eng-zho - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
cross-encoder/nli-deberta-base
cross-encoder
"2021-08-05T08:40:53Z"
93,882
14
transformers
[ "transformers", "pytorch", "deberta", "text-classification", "deberta-base-base", "zero-shot-classification", "en", "dataset:multi_nli", "dataset:snli", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
zero-shot-classification
"2022-03-02T23:29:05Z"
--- language: en pipeline_tag: zero-shot-classification tags: - deberta-base-base datasets: - multi_nli - snli metrics: - accuracy license: apache-2.0 --- # Cross-Encoder for Natural Language Inference This model was trained using [SentenceTransformers](https://sbert.net) [Cross-Encoder](https://www.sbert.net/examples/applications/cross-encoder/README.html) class. ## Training Data The model was trained on the [SNLI](https://nlp.stanford.edu/projects/snli/) and [MultiNLI](https://cims.nyu.edu/~sbowman/multinli/) datasets. For a given sentence pair, it will output three scores corresponding to the labels: contradiction, entailment, neutral. ## Performance For evaluation results, see [SBERT.net - Pretrained Cross-Encoder](https://www.sbert.net/docs/pretrained_cross-encoders.html#nli). ## Usage Pre-trained models can be used like this: ```python from sentence_transformers import CrossEncoder model = CrossEncoder('cross-encoder/nli-deberta-base') scores = model.predict([('A man is eating pizza', 'A man eats something'), ('A black race car starts up in front of a crowd of people.', 'A man is driving down a lonely road.')]) #Convert scores to labels label_mapping = ['contradiction', 'entailment', 'neutral'] labels = [label_mapping[score_max] for score_max in scores.argmax(axis=1)] ``` ## Usage with Transformers AutoModel You can use the model also directly with Transformers library (without SentenceTransformers library): ```python from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch model = AutoModelForSequenceClassification.from_pretrained('cross-encoder/nli-deberta-base') tokenizer = AutoTokenizer.from_pretrained('cross-encoder/nli-deberta-base') features = tokenizer(['A man is eating pizza', 'A black race car starts up in front of a crowd of people.'], ['A man eats something', 'A man is driving down a lonely road.'], padding=True, truncation=True, return_tensors="pt") model.eval() with torch.no_grad(): scores = model(**features).logits label_mapping = ['contradiction', 'entailment', 'neutral'] labels = [label_mapping[score_max] for score_max in scores.argmax(dim=1)] print(labels) ``` ## Zero-Shot Classification This model can also be used for zero-shot-classification: ```python from transformers import pipeline classifier = pipeline("zero-shot-classification", model='cross-encoder/nli-deberta-base') sent = "Apple just announced the newest iPhone X" candidate_labels = ["technology", "sports", "politics"] res = classifier(sent, candidate_labels) print(res) ```
SenseTime/deformable-detr
SenseTime
"2023-09-07T06:14:08Z"
93,794
11
transformers
[ "transformers", "pytorch", "safetensors", "deformable_detr", "object-detection", "vision", "dataset:coco", "arxiv:2010.04159", "license:apache-2.0", "endpoints_compatible", "has_space", "region:us" ]
object-detection
"2022-03-02T23:29:05Z"
--- license: apache-2.0 tags: - object-detection - vision datasets: - coco widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/savanna.jpg example_title: Savanna - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/football-match.jpg example_title: Football Match - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/airport.jpg example_title: Airport --- # Deformable DETR model with ResNet-50 backbone Deformable DEtection TRansformer (DETR) model trained end-to-end on COCO 2017 object detection (118k annotated images). It was introduced in the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Zhu et al. and first released in [this repository](https://github.com/fundamentalvision/Deformable-DETR). Disclaimer: The team releasing Deformable DETR did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The DETR model is an encoder-decoder transformer with a convolutional backbone. Two heads are added on top of the decoder outputs in order to perform object detection: a linear layer for the class labels and a MLP (multi-layer perceptron) for the bounding boxes. The model uses so-called object queries to detect objects in an image. Each object query looks for a particular object in the image. For COCO, the number of object queries is set to 100. The model is trained using a "bipartite matching loss": one compares the predicted classes + bounding boxes of each of the N = 100 object queries to the ground truth annotations, padded up to the same length N (so if an image only contains 4 objects, 96 annotations will just have a "no object" as class and "no bounding box" as bounding box). The Hungarian matching algorithm is used to create an optimal one-to-one mapping between each of the N queries and each of the N annotations. Next, standard cross-entropy (for the classes) and a linear combination of the L1 and generalized IoU loss (for the bounding boxes) are used to optimize the parameters of the model. ![model image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/deformable_detr_architecture.png) ## Intended uses & limitations You can use the raw model for object detection. See the [model hub](https://huggingface.co/models?search=sensetime/deformable-detr) to look for all available Deformable DETR models. ### How to use Here is how to use this model: ```python from transformers import AutoImageProcessor, DeformableDetrForObjectDetection import torch from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) processor = AutoImageProcessor.from_pretrained("SenseTime/deformable-detr") model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr") inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) # convert outputs (bounding boxes and class logits) to COCO API # let's only keep detections with score > 0.7 target_sizes = torch.tensor([image.size[::-1]]) results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.7)[0] for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): box = [round(i, 2) for i in box.tolist()] print( f"Detected {model.config.id2label[label.item()]} with confidence " f"{round(score.item(), 3)} at location {box}" ) ``` This should output: ``` Detected cat with confidence 0.856 at location [342.19, 24.3, 640.02, 372.25] Detected remote with confidence 0.739 at location [40.79, 72.78, 176.76, 117.25] Detected cat with confidence 0.859 at location [16.5, 52.84, 318.25, 470.78] ``` Currently, both the feature extractor and model support PyTorch. ## Training data The Deformable DETR model was trained on [COCO 2017 object detection](https://cocodataset.org/#download), a dataset consisting of 118k/5k annotated images for training/validation respectively. ### BibTeX entry and citation info ```bibtex @misc{https://doi.org/10.48550/arxiv.2010.04159, doi = {10.48550/ARXIV.2010.04159}, url = {https://arxiv.org/abs/2010.04159}, author = {Zhu, Xizhou and Su, Weijie and Lu, Lewei and Li, Bin and Wang, Xiaogang and Dai, Jifeng}, keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Deformable DETR: Deformable Transformers for End-to-End Object Detection}, publisher = {arXiv}, year = {2020}, copyright = {arXiv.org perpetual, non-exclusive license} } ```
cagliostrolab/animagine-xl-3.0
cagliostrolab
"2024-01-17T17:20:41Z"
93,746
709
diffusers
[ "diffusers", "safetensors", "text-to-image", "stable-diffusion", "stable-diffusion-xl", "en", "base_model:Linaqruf/animagine-xl-2.0", "license:other", "endpoints_compatible", "has_space", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
"2023-11-23T03:57:11Z"
--- license: other license_name: faipl-1.0-sd license_link: https://freedevproject.org/faipl-1.0-sd/ language: - en tags: - text-to-image - stable-diffusion - safetensors - stable-diffusion-xl base_model: Linaqruf/animagine-xl-2.0 widget: - text: 1girl, green hair, sweater, looking at viewer, upper body, beanie, outdoors, night, turtleneck, masterpiece, best quality parameter: negative_prompt: nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name example_title: 1girl - text: 1boy, male focus, green hair, sweater, looking at viewer, upper body, beanie, outdoors, night, turtleneck, masterpiece, best quality parameter: negative_prompt: nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name example_title: 1boy --- <style> .title-container { display: flex; justify-content: center; align-items: center; height: 100vh; /* Adjust this value to position the title vertically */ } .title { font-size: 2.5em; text-align: center; color: #333; font-family: 'Helvetica Neue', sans-serif; text-transform: uppercase; letter-spacing: 0.1em; padding: 0.5em 0; background: transparent; } .title span { background: -webkit-linear-gradient(45deg, #7ed56f, #28b485); -webkit-background-clip: text; -webkit-text-fill-color: transparent; } .custom-table { table-layout: fixed; width: 100%; border-collapse: collapse; margin-top: 2em; } .custom-table td { width: 50%; vertical-align: top; padding: 10px; box-shadow: 0px 0px 0px 0px rgba(0, 0, 0, 0.15); } .custom-image-container { position: relative; width: 100%; margin-bottom: 0em; overflow: hidden; border-radius: 10px; transition: transform .7s; /* Smooth transition for the container */ } .custom-image-container:hover { transform: scale(1.05); /* Scale the container on hover */ } .custom-image { width: 100%; height: auto; object-fit: cover; border-radius: 10px; transition: transform .7s; margin-bottom: 0em; } .nsfw-filter { filter: blur(8px); /* Apply a blur effect */ transition: filter 0.3s ease; /* Smooth transition for the blur effect */ } .custom-image-container:hover .nsfw-filter { filter: none; /* Remove the blur effect on hover */ } .overlay { position: absolute; bottom: 0; left: 0; right: 0; color: white; width: 100%; height: 40%; display: flex; flex-direction: column; justify-content: center; align-items: center; font-size: 1vw; font-style: bold; text-align: center; opacity: 0; /* Keep the text fully opaque */ background: linear-gradient(0deg, rgba(0, 0, 0, 0.8) 60%, rgba(0, 0, 0, 0) 100%); transition: opacity .5s; } .custom-image-container:hover .overlay { opacity: 1; /* Make the overlay always visible */ } .overlay-text { background: linear-gradient(45deg, #7ed56f, #28b485); -webkit-background-clip: text; color: transparent; /* Fallback for browsers that do not support this effect */ text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.7); /* Enhanced text shadow for better legibility */ .overlay-subtext { font-size: 0.75em; margin-top: 0.5em; font-style: italic; } .overlay, .overlay-subtext { text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.5); } </style> <h1 class="title"> <span>Animagine XL 3.0</span> </h1> <table class="custom-table"> <tr> <td> <div class="custom-image-container"> <img class="custom-image" src="https://cdn-uploads.huggingface.co/production/uploads/6365c8dbf31ef76df4042821/ep_oy_NVSMQaU162w8Gwp.png" alt="sample1"> </div> <div class="custom-image-container"> <img class="custom-image" src="https://cdn-uploads.huggingface.co/production/uploads/6365c8dbf31ef76df4042821/FGFZgsqrhOcor5mid5eap.png" alt="sample4"> </div> </td> <td> <div class="custom-image-container"> <img class="custom-image" src="https://cdn-uploads.huggingface.co/production/uploads/6365c8dbf31ef76df4042821/EuvINvBsCKZQuspZHN-uF.png" alt="sample2"> </div> <div class="custom-image-container"> <img class="custom-image" src="https://cdn-uploads.huggingface.co/production/uploads/6365c8dbf31ef76df4042821/yyRqdHJfePKl7ytB6ieX9.png" alt="sample3"> </td> <td> <div class="custom-image-container"> <img class="custom-image" src="https://cdn-uploads.huggingface.co/production/uploads/6365c8dbf31ef76df4042821/2oWmFh728T0hzEkUtSmgy.png" alt="sample1"> </div> <div class="custom-image-container"> <img class="custom-image" src="https://cdn-uploads.huggingface.co/production/uploads/6365c8dbf31ef76df4042821/3yaZxWkUOenZSSNtGQR_3.png" alt="sample4"> </div> </td> </tr> </table> **Animagine XL 3.0** is the latest version of the sophisticated open-source anime text-to-image model, building upon the capabilities of its predecessor, Animagine XL 2.0. Developed based on Stable Diffusion XL, this iteration boasts superior image generation with notable improvements in hand anatomy, efficient tag ordering, and enhanced knowledge about anime concepts. Unlike the previous iteration, we focused to make the model learn concepts rather than aesthetic. ## Model Details - **Developed by**: [Cagliostro Research Lab](https://huggingface.co/cagliostrolab) - **Model type**: Diffusion-based text-to-image generative model - **Model Description**: Animagine XL 3.0 is engineered to generate high-quality anime images from textual prompts. It features enhanced hand anatomy, better concept understanding, and prompt interpretation, making it the most advanced model in its series. - **License**: [Fair AI Public License 1.0-SD](https://freedevproject.org/faipl-1.0-sd/) - **Finetuned from model**: [Animagine XL 2.0](https://huggingface.co/Linaqruf/animagine-xl-2.0) ## Gradio & Colab Integration Animagine XL 3.0 is accessible through user-friendly platforms such as Gradio and Google Colab: - **Gradio Web UI**: [Open In Spaces](https://huggingface.co/spaces/Linaqruf/Animagine-XL) - **Google Colab**: [Open In Colab](https://colab.research.google.com/#fileId=https%3A//huggingface.co/Linaqruf/animagine-xl/blob/main/Animagine_XL_demo.ipynb) ## 🧨 Diffusers Installation To use Animagine XL 3.0, install the required libraries as follows: ```bash pip install diffusers --upgrade pip install transformers accelerate safetensors ``` Example script for generating images with Animagine XL 3.0: ```python import torch from diffusers import ( StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler, AutoencoderKL ) # Load VAE component vae = AutoencoderKL.from_pretrained( "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16 ) # Configure the pipeline pipe = StableDiffusionXLPipeline.from_pretrained( "cagliostrolab/animagine-xl-3.0", vae=vae, torch_dtype=torch.float16, use_safetensors=True, ) pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to('cuda') # Define prompts and generate image prompt = "1girl, arima kana, oshi no ko, solo, upper body, v, smile, looking at viewer, outdoors, night" negative_prompt = "nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name" image = pipe( prompt, negative_prompt=negative_prompt, width=832, height=1216, guidance_scale=7, num_inference_steps=28 ).images[0] ``` ## Usage Guidelines ### Tag Ordering Prompting is a bit different in this iteration, for optimal results, it's recommended to follow the structured prompt template because we train the model like this: ``` 1girl/1boy, character name, from what series, everything else in any order. ``` ## Special Tags Like the previous iteration, this model was trained with some special tags to steer the result toward quality, rating and when the posts was created. The model can still do the job without these special tags, but it’s recommended to use them if we want to make the model easier to handle. ### Quality Modifiers | Quality Modifier | Score Criterion | | ---------------- | --------------- | | `masterpiece` | >150 | | `best quality` | 100-150 | | `high quality` | 75-100 | | `medium quality` | 25-75 | | `normal quality` | 0-25 | | `low quality` | -5-0 | | `worst quality` | <-5 | ### Rating Modifiers | Rating Modifier | Rating Criterion | | ------------------------------| ------------------------- | | `rating: general` | General | | `rating: sensitive` | Sensitive | | `rating: questionable`, `nsfw`| Questionable | | `rating: explicit`, `nsfw` | Explicit | ### Year Modifier These tags help to steer the result toward modern or vintage anime art styles, ranging from `newest` to `oldest`. | Year Tag | Year Range | | -------- | ---------------- | | `newest` | 2022 to 2023 | | `late` | 2019 to 2021 | | `mid` | 2015 to 2018 | | `early` | 2011 to 2014 | | `oldest` | 2005 to 2010 | ## Recommended settings To guide the model towards generating high-aesthetic images, use negative prompts like: ``` nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name ``` For higher quality outcomes, prepend prompts with: ``` masterpiece, best quality ``` However, be careful to use `masterpiece`, `best quality` because many high-scored datasets are NSFW. It’s better to add `nsfw`, `rating: sensitive` to the negative prompt and `rating: general` to the positive prompt. it’s recommended to use a lower classifier-free guidance (CFG Scale) of around 5-7, sampling steps below 30, and to use Euler Ancestral (Euler a) as a sampler. ### Multi Aspect Resolution This model supports generating images at the following dimensions: | Dimensions | Aspect Ratio | |-------------------|-----------------| | `1024 x 1024` | 1:1 Square | | `1152 x 896` | 9:7 | | `896 x 1152` | 7:9 | | `1216 x 832` | 19:13 | | `832 x 1216` | 13:19 | | `1344 x 768` | 7:4 Horizontal | | `768 x 1344` | 4:7 Vertical | | `1536 x 640` | 12:5 Horizontal | | `640 x 1536` | 5:12 Vertical | ## Training and Hyperparameters - **Animagine XL 3.0** was trained on a 2x A100 GPU with 80GB memory for 21 days or over 500 gpu hours. The training process encompassed three stages: - Base: - **Feature Alignment Stage**: Utilized 1.2m images to acquaint the model with basic anime concepts. - **Refining UNet Stage**: Employed 2.5k curated datasets to only fine-tune the UNet. - Curated: - **Aesthetic Tuning Stage**: Employed 3.5k high-quality curated datasets to refine the model's art style. ### Hyperparameters | Stage | Epochs | UNet Learning Rate | Train Text Encoder | Text Encoder Learning Rate | Batch Size | Mixed Precision | Noise Offset | |-----------------------------|--------|--------------------|--------------------|----------------------------|----------------|-----------------|--------------| | **Feature Alignment Stage** | 10 | 7.5e-6 | True | 3.75e-6 | 48 x 2 | fp16 | N/A | | **Refining UNet Stage** | 10 | 2e-6 | False | N/A | 48 | fp16 | 0.0357 | | **Aesthetic Tuning Stage** | 10 | 1e-6 | False | N/A | 48 | fp16 | 0.0357 | ## Model Comparison ### Training Config | Configuration Item | Animagine XL 2.0 | Animagine 3.0 | |-----------------------|-------------------------|-------------------------| | **GPU** | A100 80G | 2 x A100 80G | | **Dataset** | 170k + 83k images | 1271990 + 3500 Images | | **Shuffle Separator** | N/A | True | | **Global Epochs** | 20 | 20 | | **Learning Rate** | 1e-6 | 7.5e-6 | | **Batch Size** | 32 | 48 x 2 | | **Train Text Encoder**| True | True | | **Train Special Tags**| True | True | | **Image Resolution** | 1024 | 1024 | | **Bucket Resolution** | 2048 x 512 | 2048 x 512 | Source code and training config are available here: https://github.com/cagliostrolab/sd-scripts/tree/main/notebook ## Limitations While "Animagine XL 3.0" represents a significant advancement in anime text-to-image generation, it's important to acknowledge its limitations to understand its best use cases and potential areas for future improvement. 1. **Concept Over Artstyle Focus**: The model prioritizes learning concepts rather than specific art styles, which might lead to variations in aesthetic appeal compared to its predecessor. 2. **Non-Photorealistic Design**: Animagine XL 3.0 is not designed for generating photorealistic or realistic images, focusing instead on anime-style artwork. 3. **Anatomical Challenges**: Despite improvements, the model can still struggle with complex anatomical structures, particularly in dynamic poses, resulting in occasional inaccuracies. 4. **Dataset Limitations**: The training dataset of 1.2 million images may not encompass all anime characters or series, limiting the model's ability to generate less known or newer characters. 5. **Natural Language Processing**: The model is not optimized for interpreting natural language, requiring more structured and specific prompts for best results. 6. **NSFW Content Risk**: Using high-quality tags like 'masterpiece' or 'best quality' carries a risk of generating NSFW content inadvertently, due to the prevalence of such images in high-scoring training datasets. These limitations highlight areas for potential refinement in future iterations and underscore the importance of careful prompt crafting for optimal results. Understanding these constraints can help users better navigate the model's capabilities and tailor their expectations accordingly. ## Acknowledgements We extend our gratitude to the entire team and community that contributed to the development of Animagine XL 3.0, including our partners and collaborators who provided resources and insights crucial for this iteration. - **Main:** For the open source grant supporting our research, thank you so much. - **Cagliostro Lab Collaborator:** For helping quality checking during pretraining and curating datasets during fine-tuning. - **Kohya SS:** For providing the essential training script and merged our PR about `keep_tokens_separator` or Shuffle Separator. - **Camenduru Server Community:** For invaluable insights and support and quality checking - **NovelAI:** For inspiring how to build the datasets and label it using tag ordering. ## Collaborators - [Linaqruf](https://huggingface.co/Linaqruf) - [DamarJati](https://huggingface.co/DamarJati) - [Asahina2K](https://huggingface.co/Asahina2K) - [ItsMeBell](https://huggingface.co/ItsMeBell) - [Zwicky18](https://huggingface.co/Zwicky18) - [NekoFi](https://huggingface.co/NekoFi) - [Scipius2121](https://huggingface.co/Scipius2121) - [Raelina](https://huggingface.co/Raelina) ## License Animagine XL 3.0 now uses the [Fair AI Public License 1.0-SD](https://freedevproject.org/faipl-1.0-sd/), compatible with Stable Diffusion models. Key points: 1. **Modification Sharing:** If you modify Animagine XL 3.0, you must share both your changes and the original license. 2. **Source Code Accessibility:** If your modified version is network-accessible, provide a way (like a download link) for others to get the source code. This applies to derived models too. 3. **Distribution Terms:** Any distribution must be under this license or another with similar rules. 4. **Compliance:** Non-compliance must be fixed within 30 days to avoid license termination, emphasizing transparency and adherence to open-source values. The choice of this license aims to keep Animagine XL 3.0 open and modifiable, aligning with open source community spirit. It protects contributors and users, encouraging a collaborative, ethical open-source community. This ensures the model not only benefits from communal input but also respects open-source development freedoms.
meta-llama/Llama-2-70b-chat-hf
meta-llama
"2024-03-18T22:20:09Z"
93,464
2,074
transformers
[ "transformers", "pytorch", "safetensors", "llama", "text-generation", "facebook", "meta", "llama-2", "conversational", "en", "arxiv:2307.09288", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
text-generation
"2023-07-14T18:02:07Z"
--- extra_gated_heading: You need to share contact information with Meta to access this model extra_gated_prompt: >- ### LLAMA 2 COMMUNITY LICENSE AGREEMENT "Agreement" means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein. "Documentation" means the specifications, manuals and documentation accompanying Llama 2 distributed by Meta at https://ai.meta.com/resources/models-and-libraries/llama-downloads/. "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf. "Llama 2" means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at ai.meta.com/resources/models-and-libraries/llama-downloads/. "Llama Materials" means, collectively, Meta's proprietary Llama 2 and documentation (and any portion thereof) made available under this Agreement. "Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland). By clicking "I Accept" below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement. 1. License Rights and Redistribution. a. Grant of Rights. You are granted a non-exclusive, worldwide, non- transferable and royalty-free limited license under Meta's intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials. b. Redistribution and Use. i. If you distribute or make the Llama Materials, or any derivative works thereof, available to a third party, you shall provide a copy of this Agreement to such third party. ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you. iii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a "Notice" text file distributed as a part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved." iv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at https://ai.meta.com/llama/use-policy), which is hereby incorporated by reference into this Agreement. v. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Llama 2 or derivative works thereof). 2. Additional Commercial Terms. If, on the Llama 2 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee's affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights. 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS. 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING. 5. Intellectual Property. a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials. b. Subject to Meta's ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications. c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Llama 2 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials. 6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement. 7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement. USE POLICY ### Llama 2 Acceptable Use Policy Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy). #### Prohibited Uses We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to: 1. Violate the law or others’ rights, including to: 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as: 1. Violence or terrorism 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material 3. Human trafficking, exploitation, and sexual violence 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials. 5. Sexual solicitation 6. Any other criminal activity 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following: 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State 2. Guns and illegal weapons (including weapon development) 3. Illegal drugs and regulated/controlled substances 4. Operation of critical infrastructure, transportation technologies, or heavy machinery 5. Self-harm or harm to others, including suicide, cutting, and eating disorders 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual 3. Intentionally deceive or mislead others, including use of Llama 2 related to the following: 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content 3. Generating, promoting, or further distributing spam 4. Impersonating another individual without consent, authorization, or legal right 5. Representing that the use of Llama 2 or outputs are human-generated 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement 4. Fail to appropriately disclose to end users any known dangers of your AI system Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means: * Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) * Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) * Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [LlamaUseReport@meta.com](mailto:LlamaUseReport@meta.com) extra_gated_fields: First Name: text Last Name: text Date of birth: date_picker Country: country Affiliation: text geo: ip_location By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox extra_gated_description: The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/). extra_gated_button_content: Submit language: - en pipeline_tag: text-generation tags: - facebook - meta - pytorch - llama - llama-2 --- # **Llama 2** Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 70B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. ## Model Details *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. **Model Developers** Meta **Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. **Input** Models input text only. **Output** Models generate text only. **Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. ||Training Data|Params|Content Length|GQA|Tokens|LR| |---|---|---|---|---|---|---| |Llama 2|*A new mix of publicly available online data*|7B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|13B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|70B|4k|&#10004;|2.0T|1.5 x 10<sup>-4</sup>| *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. **Model Dates** Llama 2 was trained between January 2023 and July 2023. **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) **Research Paper** ["Llama-2: Open Foundation and Fine-tuned Chat Models"](arxiv.org/abs/2307.09288) ## Intended Use **Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. **Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. ## Hardware and Software **Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. **Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. ||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)| |---|---|---|---| |Llama 2 7B|184320|400|31.22| |Llama 2 13B|368640|400|62.44| |Llama 2 70B|1720320|400|291.42| |Total|3311616||539.00| **CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. ## Training Data **Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. **Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. ## Evaluation Results In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. |Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval| |---|---|---|---|---|---|---|---|---|---| |Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9| |Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9| |Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7| |Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6| |Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3| |Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1| |Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**| **Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. |||TruthfulQA|Toxigen| |---|---|---|---| |Llama 1|7B|27.42|23.00| |Llama 1|13B|41.74|23.08| |Llama 1|33B|44.19|22.57| |Llama 1|65B|48.71|21.77| |Llama 2|7B|33.29|**21.25**| |Llama 2|13B|41.86|26.10| |Llama 2|70B|**50.18**|24.60| **Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). |||TruthfulQA|Toxigen| |---|---|---|---| |Llama-2-Chat|7B|57.04|**0.00**| |Llama-2-Chat|13B|62.18|**0.00**| |Llama-2-Chat|70B|**64.14**|0.01| **Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above. ## Ethical Considerations and Limitations Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide) ## Reporting Issues Please report any software “bug,” or other problems with the models through one of the following means: - Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) - Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) - Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) ## Llama Model Index |Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf| |---|---|---|---|---| |7B| [Link](https://huggingface.co/meta-llama/Llama-2-7b) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)| |13B| [Link](https://huggingface.co/meta-llama/Llama-2-13b) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf)| |70B| [Link](https://huggingface.co/meta-llama/Llama-2-70b) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf)|
Daoguang/PyCodeGPT
Daoguang
"2023-01-04T10:21:24Z"
93,377
5
transformers
[ "transformers", "pytorch", "gpt_neo", "text-generation", "license:afl-3.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text-generation
"2023-01-04T10:09:30Z"
--- license: afl-3.0 --- # PyCodeGPT A pre-trained GPT model for Python code completion and generation ## What is it? PyCodeGPT is efficient and effective GPT-Neo-based model for python code generation task, which is similar to [OpenAI Codex](https://openai.com/blog/openai-codex/), [Github Copliot](https://copilot.github.com/), [CodeParrot](https://huggingface.co/blog/codeparrot), [AlphaCode](https://deepmind.com/blog/article/Competitive-programming-with-AlphaCode). ## Training Data Due to the small size of public released dataset, we proposed to collect data from GitHub from scratch. We first crawled 1.2M python-related repositories hosted by GitHub. Then, we used these repository URLs to download all contents of each repository from GitHub. After that, we got 60M raw python files under 1MB with a total size of 330GB. Finally, we carefully designed various strategies of data cleaning to get about 96GB data for training. Please refer to the following table for the details. |Model|Repositories|Size and file after filtering| |:------:|:---:|:---:| | CodeParrot | 0.56M | 12GB (compressed), 5.4M | | Codex | 54M | 159GB | | PyCodeGPT | 1.2M | 96GB, 13M | ## Pretrained models we aims to train median-large pre-trained models (model size with 110M) based on GPT-Neo: - PyCodeGPT-110M: derived from GPT-Neo 125M with a vocabulary size of 32K. ## GitHub [https://github.com/microsoft/PyCodeGPT](https://github.com/microsoft/PyCodeGPT) ## Evaluation Results Here's our evaluation result on HumanEval dataset: Note: our model can have a comparable accuracy with Codex of similar model size. |Model|Pass@1|Pass@10|Pass@100| |:------:|:---:|:---:|:---:| |PyCodeGPT-110M |**8.32%** |**13.53%** |**18.3%** | ||||| |GPT-Neo 125M |0.75% |1.88% |2.97% | |GPT-Neo 1.3B |4.97% |7.47% |16.3% | |GPT-Neo 2.7B |6.41% |11.27% |21.37% | |GPT-J 6B |11.62% |15.74% |27.74% | ||||| |TabNine |2.58% |4.35% |7.59% | ||||| |CodeParrot 110M |3.80% |6.57% |12.78% | |CodeParrot 1.5B |3.58% |8.03% |14.96% | ||||| |Codex 12M |2.00% |3.62% |8.58% | |Codex 25M |3.21% |7.1% |12.89% | |Codex 42M |5.06% |8.8% |15.55% | |Codex 85M |8.22% |12.81% |22.4% | |Codex 300M |13.17% |20.37% |36.27% | |Codex 679M |16.22% |25.7% |40.95% | |Codex 2.5B |21.36% |35.42% |59.5% | |Codex 12B |28.81% |46.81% |72.31% | ||||| |Pretrained Decoder-only 13M (AlphaCode) |1.5% |3.6% |8.6% | |Pretrained Decoder-only 29M (AlphaCode) |3.4% |5.8% |11.2% | |Pretrained Decoder-only 55M (AlphaCode) |4.2% |8.2% |16.9% | |Pretrained Decoder-only 89M (AlphaCode) |4.3% |12.2% |20.0% | |Pretrained Decoder-only 302M (AlphaCode) |11.6% |18.8% |31.8% | |Pretrained Decoder-only 685M (AlphaCode) |14.2% |24.4% |38.8% | |Pretrained Decoder-only 1.1B (AlphaCode) |17.1% |28.2% |45.3% | ||||| |PolyCoder 160M |2.13% |3.35% |4.88% | |PolyCoder 400M |2.96% |5.29% |11.59% | |PolyCoder 2.7B |5.59% |9.84% |17.68% | ## Reference If you want to use the models, you need to cite our following paper: ``` @inproceedings{CERT, title={{CERT}: Continual Pre-training on Sketches for Library-oriented Code Generation}, author={Zan, Daoguang and Chen, Bei and Yang, Dejian and Lin, Zeqi and Kim, Minsu and Guan, Bei and Wang, Yongji and Chen, Weizhu and Lou, Jian-Guang}, booktitle={The 2022 International Joint Conference on Artificial Intelligence}, year={2022} } ```
microsoft/trocr-large-handwritten
microsoft
"2023-01-24T16:57:33Z"
93,372
60
transformers
[ "transformers", "pytorch", "vision-encoder-decoder", "trocr", "image-to-text", "arxiv:2109.10282", "endpoints_compatible", "has_space", "region:us" ]
image-to-text
"2022-03-02T23:29:05Z"
--- tags: - trocr - image-to-text widget: - src: https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg example_title: Note 1 - src: https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSoolxi9yWGAT5SLZShv8vVd0bz47UWRzQC19fDTeE8GmGv_Rn-PCF1pP1rrUx8kOjA4gg&usqp=CAU example_title: Note 2 - src: https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRNYtTuSBpZPV_nkBYPMFwVVD9asZOPgHww4epu9EqWgDmXW--sE2o8og40ZfDGo87j5w&usqp=CAU example_title: Note 3 --- # TrOCR (large-sized model, fine-tuned on IAM) TrOCR model fine-tuned on the [IAM dataset](https://fki.tic.heia-fr.ch/databases/iam-handwriting-database). It was introduced in the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Li et al. and first released in [this repository](https://github.com/microsoft/unilm/tree/master/trocr). Disclaimer: The team releasing TrOCR did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The TrOCR model is an encoder-decoder model, consisting of an image Transformer as encoder, and a text Transformer as decoder. The image encoder was initialized from the weights of BEiT, while the text decoder was initialized from the weights of RoBERTa. Images are presented to the model as a sequence of fixed-size patches (resolution 16x16), which are linearly embedded. One also adds absolute position embeddings before feeding the sequence to the layers of the Transformer encoder. Next, the Transformer text decoder autoregressively generates tokens. ## Intended uses & limitations You can use the raw model for optical character recognition (OCR) on single text-line images. See the [model hub](https://huggingface.co/models?search=microsoft/trocr) to look for fine-tuned versions on a task that interests you. ### How to use Here is how to use this model in PyTorch: ```python from transformers import TrOCRProcessor, VisionEncoderDecoderModel from PIL import Image import requests # load image from the IAM database url = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' image = Image.open(requests.get(url, stream=True).raw).convert("RGB") processor = TrOCRProcessor.from_pretrained('microsoft/trocr-large-handwritten') model = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-large-handwritten') pixel_values = processor(images=image, return_tensors="pt").pixel_values generated_ids = model.generate(pixel_values) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] ``` ### BibTeX entry and citation info ```bibtex @misc{li2021trocr, title={TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models}, author={Minghao Li and Tengchao Lv and Lei Cui and Yijuan Lu and Dinei Florencio and Cha Zhang and Zhoujun Li and Furu Wei}, year={2021}, eprint={2109.10282}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
cardiffnlp/tweet-topic-21-multi
cardiffnlp
"2023-05-28T04:56:09Z"
93,334
58
transformers
[ "transformers", "pytorch", "tf", "roberta", "text-classification", "en", "dataset:cardiffnlp/tweet_topic_multi", "arxiv:2209.09824", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text-classification
"2022-06-06T14:52:42Z"
--- language: en widget: - text: It is great to see athletes promoting awareness for climate change. datasets: - cardiffnlp/tweet_topic_multi license: mit metrics: - f1 - accuracy pipeline_tag: text-classification --- # tweet-topic-21-multi This model is based on a [TimeLMs](https://github.com/cardiffnlp/timelms) language model trained on ~124M tweets from January 2018 to December 2021 (see [here](https://huggingface.co/cardiffnlp/twitter-roberta-base-2021-124m)), and finetuned for multi-label topic classification on a corpus of 11,267 [tweets](https://huggingface.co/datasets/cardiffnlp/tweet_topic_multi). This model is suitable for English. - Reference Paper: [TweetTopic](https://arxiv.org/abs/2209.09824) (COLING 2022). <b>Labels</b>: | <span style="font-weight:normal">0: arts_&_culture</span> | <span style="font-weight:normal">5: fashion_&_style</span> | <span style="font-weight:normal">10: learning_&_educational</span> | <span style="font-weight:normal">15: science_&_technology</span> | |-----------------------------|---------------------|----------------------------|--------------------------| | 1: business_&_entrepreneurs | 6: film_tv_&_video | 11: music | 16: sports | | 2: celebrity_&_pop_culture | 7: fitness_&_health | 12: news_&_social_concern | 17: travel_&_adventure | | 3: diaries_&_daily_life | 8: food_&_dining | 13: other_hobbies | 18: youth_&_student_life | | 4: family | 9: gaming | 14: relationships | | ## Full classification example ```python from transformers import AutoModelForSequenceClassification, TFAutoModelForSequenceClassification from transformers import AutoTokenizer import numpy as np from scipy.special import expit MODEL = f"cardiffnlp/tweet-topic-21-multi" tokenizer = AutoTokenizer.from_pretrained(MODEL) # PT model = AutoModelForSequenceClassification.from_pretrained(MODEL) class_mapping = model.config.id2label text = "It is great to see athletes promoting awareness for climate change." tokens = tokenizer(text, return_tensors='pt') output = model(**tokens) scores = output[0][0].detach().numpy() scores = expit(scores) predictions = (scores >= 0.5) * 1 # TF #tf_model = TFAutoModelForSequenceClassification.from_pretrained(MODEL) #class_mapping = tf_model.config.id2label #text = "It is great to see athletes promoting awareness for climate change." #tokens = tokenizer(text, return_tensors='tf') #output = tf_model(**tokens) #scores = output[0][0] #scores = expit(scores) #predictions = (scores >= 0.5) * 1 # Map to classes for i in range(len(predictions)): if predictions[i]: print(class_mapping[i]) ``` Output: ``` news_&_social_concern sports ``` ### BibTeX entry and citation info Please cite the [reference paper](https://aclanthology.org/2022.coling-1.299/) if you use this model. ```bibtex @inproceedings{antypas-etal-2022-twitter, title = "{T}witter Topic Classification", author = "Antypas, Dimosthenis and Ushio, Asahi and Camacho-Collados, Jose and Silva, Vitor and Neves, Leonardo and Barbieri, Francesco", booktitle = "Proceedings of the 29th International Conference on Computational Linguistics", month = oct, year = "2022", address = "Gyeongju, Republic of Korea", publisher = "International Committee on Computational Linguistics", url = "https://aclanthology.org/2022.coling-1.299", pages = "3386--3400" } ```
Qwen/Qwen1.5-7B-Chat-GGUF
Qwen
"2024-04-09T16:49:52Z"
93,302
29
null
[ "gguf", "chat", "text-generation", "en", "license:other", "region:us" ]
text-generation
"2024-02-03T11:53:25Z"
--- license: other license_name: tongyi-qianwen license_link: https://huggingface.co/Qwen/Qwen1.5-7B-Chat-GGUF/blob/main/LICENSE language: - en pipeline_tag: text-generation tags: - chat --- # Qwen1.5-7B-Chat-GGUF ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in human preference for chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of `trust_remote_code`. For more details, please refer to our [blog post](https://qwenlm.github.io/blog/qwen1.5/) and [GitHub repo](https://github.com/QwenLM/Qwen1.5). In this repo, we provide quantized models in the GGUF formats, including `q2_k`, `q3_k_m`, `q4_0`, `q4_k_m`, `q5_0`, `q5_k_m`, `q6_k` and `q8_0`. To demonstrate their model quality, we follow [`llama.cpp`](https://github.com/ggerganov/llama.cpp) to evaluate their perplexity on wiki test set. Results are shown below: |Size | fp16 | q8_0 | q6_k | q5_k_m | q5_0 | q4_k_m | q4_0 | q3_k_m | q2_k | |--------|---------|---------|---------|---------|---------|---------|---------|---------|---------| |0.5B | 34.20 | 34.22 | 34.31 | 33.80 | 34.02 | 34.27 | 36.74 | 38.25 | 62.14 | |1.8B | 15.99 | 15.99 | 15.99 | 16.09 | 16.01 | 16.22 | 16.54 | 17.03 | 19.99 | |4B | 13.20 | 13.21 | 13.28 | 13.24 | 13.27 | 13.61 | 13.44 | 13.67 | 15.65 | |7B | 14.21 | 14.24 | 14.35 | 14.32 | 14.12 | 14.35 | 14.47 | 15.11 | 16.57 | |14B | 10.91 | 10.91 | 10.93 | 10.98 | 10.88 | 10.92 | 10.92 | 11.24 | 12.27 | |32B | 8.87 | 8.89 | 8.91 | 8.94 | 8.93 | 8.96 | 9.17 | 9.14 | 10.51 | |72B | 7.97 | 7.99 | 7.99 | 7.99 | 8.01 | 8.00 | 8.01 | 8.06 | 8.63 | ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention. ## Training details We pretrained the models with a large amount of data, and we post-trained the models with both supervised finetuning and direct preference optimization. ## Requirements We advise you to clone [`llama.cpp`](https://github.com/ggerganov/llama.cpp) and install it following the official guide. ## How to use Cloning the repo may be inefficient, and thus you can manually download the GGUF file that you need or use `huggingface-cli` (`pip install huggingface_hub`) as shown below: ```shell huggingface-cli download Qwen/Qwen1.5-7B-Chat-GGUF qwen1_5-7b-chat-q5_k_m.gguf --local-dir . --local-dir-use-symlinks False ``` We demonstrate how to use `llama.cpp` to run Qwen1.5: ```shell ./main -m qwen1_5-7b-chat-q5_k_m.gguf -n 512 --color -i -cml -f prompts/chat-with-qwen.txt ``` ## Citation If you find our work helpful, feel free to give us a cite. ``` @article{qwen, title={Qwen Technical Report}, author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu}, journal={arXiv preprint arXiv:2309.16609}, year={2023} } ```
MonoHime/rubert-base-cased-sentiment-new
MonoHime
"2023-03-17T09:03:04Z"
92,914
18
transformers
[ "transformers", "pytorch", "safetensors", "bert", "text-classification", "sentiment", "ru", "dataset:Tatyana/ru_sentiment_dataset", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
text-classification
"2022-03-02T23:29:05Z"
--- language: - ru tags: - sentiment - text-classification datasets: - Tatyana/ru_sentiment_dataset --- # Model Card for RuBERT for Sentiment Analysis # Model Details ## Model Description Russian texts sentiment classification. - **Developed by:** Tatyana Voloshina - **Shared by [Optional]:** Tatyana Voloshina - **Model type:** Text Classification - **Language(s) (NLP):** More information needed - **License:** More information needed - **Parent Model:** BERT - **Resources for more information:** - [GitHub Repo](https://github.com/T-Sh/Sentiment-Analysis) # Uses ## Direct Use This model can be used for the task of text classification. ## Downstream Use [Optional] More information needed. ## Out-of-Scope Use The model should not be used to intentionally create hostile or alienating environments for people. # Bias, Risks, and Limitations Significant research has explored bias and fairness issues with language models (see, e.g., [Sheng et al. (2021)](https://aclanthology.org/2021.acl-long.330.pdf) and [Bender et al. (2021)](https://dl.acm.org/doi/pdf/10.1145/3442188.3445922)). Predictions generated by the model may include disturbing and harmful stereotypes across protected classes; identity characteristics; and sensitive, social, and occupational groups. ## Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. # Training Details ## Training Data Model trained on [Tatyana/ru_sentiment_dataset](https://huggingface.co/datasets/Tatyana/ru_sentiment_dataset) ## Training Procedure ### Preprocessing More information needed ### Speeds, Sizes, Times More information needed # Evaluation ## Testing Data, Factors & Metrics ### Testing Data More information needed ### Factors More information needed ### Metrics More information needed ## Results More information needed # Model Examination ## Labels meaning 0: NEUTRAL 1: POSITIVE 2: NEGATIVE # Environmental Impact Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** More information needed - **Hours used:** More information needed - **Cloud Provider:** More information needed - **Compute Region:** More information needed - **Carbon Emitted:** More information needed # Technical Specifications [optional] ## Model Architecture and Objective More information needed ## Compute Infrastructure More information needed ### Hardware More information needed ### Software More information needed. # Citation More information needed. # Glossary [optional] More information needed # More Information [optional] More information needed # Model Card Authors [optional] Tatyana Voloshina in collaboration with Ezi Ozoani and the Hugging Face team # Model Card Contact More information needed # How to Get Started with the Model Use the code below to get started with the model. <details> <summary> Click to expand </summary> Needed pytorch trained model presented in [Drive](https://drive.google.com/drive/folders/1EnJBq0dGfpjPxbVjybqaS7PsMaPHLUIl?usp=sharing). Load and place model.pth.tar in folder next to another files of a model. ```python !pip install tensorflow-gpu !pip install deeppavlov !python -m deeppavlov install squad_bert !pip install fasttext !pip install transformers !python -m deeppavlov install bert_sentence_embedder from deeppavlov import build_model model = build_model(path_to_model/rubert_sentiment.json) model(["Сегодня хорошая погода", "Я счастлив проводить с тобою время", "Мне нравится эта музыкальная композиция"]) ``` </details>
jonatasgrosman/wav2vec2-large-xlsr-53-dutch
jonatasgrosman
"2022-12-14T01:58:20Z"
92,910
8
transformers
[ "transformers", "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "audio", "hf-asr-leaderboard", "mozilla-foundation/common_voice_6_0", "nl", "robust-speech-event", "speech", "xlsr-fine-tuning-week", "dataset:common_voice", "dataset:mozilla-foundation/common_voice_6_0", "doi:10.57967/hf/0203", "license:apache-2.0", "model-index", "endpoints_compatible", "has_space", "region:us" ]
automatic-speech-recognition
"2022-03-02T23:29:05Z"
--- language: nl license: apache-2.0 datasets: - common_voice - mozilla-foundation/common_voice_6_0 metrics: - wer - cer tags: - audio - automatic-speech-recognition - hf-asr-leaderboard - mozilla-foundation/common_voice_6_0 - nl - robust-speech-event - speech - xlsr-fine-tuning-week model-index: - name: XLSR Wav2Vec2 Dutch by Jonatas Grosman results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice nl type: common_voice args: nl metrics: - name: Test WER type: wer value: 15.72 - name: Test CER type: cer value: 5.35 - name: Test WER (+LM) type: wer value: 12.84 - name: Test CER (+LM) type: cer value: 4.64 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Robust Speech Event - Dev Data type: speech-recognition-community-v2/dev_data args: nl metrics: - name: Dev WER type: wer value: 35.79 - name: Dev CER type: cer value: 17.67 - name: Dev WER (+LM) type: wer value: 31.54 - name: Dev CER (+LM) type: cer value: 16.37 --- # Fine-tuned XLSR-53 large model for speech recognition in Dutch Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Dutch using the train and validation splits of [Common Voice 6.1](https://huggingface.co/datasets/common_voice) and [CSS10](https://github.com/Kyubyong/css10). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned thanks to the GPU credits generously given by the [OVHcloud](https://www.ovhcloud.com/en/public-cloud/ai-training/) :) The script used for training can be found here: https://github.com/jonatasgrosman/wav2vec2-sprint ## Usage The model can be used directly (without a language model) as follows... Using the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) library: ```python from huggingsound import SpeechRecognitionModel model = SpeechRecognitionModel("jonatasgrosman/wav2vec2-large-xlsr-53-dutch") audio_paths = ["/path/to/file.mp3", "/path/to/another_file.wav"] transcriptions = model.transcribe(audio_paths) ``` Writing your own inference script: ```python import torch import librosa from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor LANG_ID = "nl" MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-dutch" SAMPLES = 10 test_dataset = load_dataset("common_voice", LANG_ID, split=f"test[:{SAMPLES}]") processor = Wav2Vec2Processor.from_pretrained(MODEL_ID) model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID) # Preprocessing the datasets. # We need to read the audio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = librosa.load(batch["path"], sr=16_000) batch["speech"] = speech_array batch["sentence"] = batch["sentence"].upper() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_sentences = processor.batch_decode(predicted_ids) for i, predicted_sentence in enumerate(predicted_sentences): print("-" * 100) print("Reference:", test_dataset[i]["sentence"]) print("Prediction:", predicted_sentence) ``` | Reference | Prediction | | ------------- | ------------- | | DE ABORIGINALS ZIJN DE OORSPRONKELIJKE BEWONERS VAN AUSTRALIË. | DE ABBORIGENALS ZIJN DE OORSPRONKELIJKE BEWONERS VAN AUSTRALIË | | MIJN TOETSENBORD ZIT VOL STOF. | MIJN TOETSENBORD ZIT VOL STOF | | ZE HAD DE BANK BESCHADIGD MET HAAR SKATEBOARD. | ZE HAD DE BANK BESCHADIGD MET HAAR SCHEETBOORD | | WAAR LAAT JIJ JE ONDERHOUD DOEN? | WAAR LAAT JIJ HET ONDERHOUD DOEN | | NA HET LEZEN VAN VELE BEOORDELINGEN HAD ZE EINDELIJK HAAR OOG LATEN VALLEN OP EEN LAPTOP MET EEN QWERTY TOETSENBORD. | NA HET LEZEN VAN VELE BEOORDELINGEN HAD ZE EINDELIJK HAAR OOG LATEN VALLEN OP EEN LAPTOP MET EEN QUERTITOETSEMBORD | | DE TAMPONS ZIJN OP. | DE TAPONT ZIJN OP | | MARIJKE KENT OLIVIER NU AL MEER DAN TWEE JAAR. | MAARRIJKEN KENT OLIEVIER NU AL MEER DAN TWEE JAAR | | HET VOEREN VAN BROOD AAN EENDEN IS EIGENLIJK ONGEZOND VOOR DE BEESTEN. | HET VOEREN VAN BEUROT AAN EINDEN IS EIGENLIJK ONGEZOND VOOR DE BEESTEN | | PARKET MOET JE STOFZUIGEN, TEGELS MOET JE DWEILEN. | PARKET MOET JE STOF ZUIGEN MAAR TEGELS MOET JE DWEILEN | | IN ONZE BUURT KENT IEDEREEN ELKAAR. | IN ONZE BUURT KENT IEDEREEN ELKAAR | ## Evaluation 1. To evaluate on `mozilla-foundation/common_voice_6_0` with split `test` ```bash python eval.py --model_id jonatasgrosman/wav2vec2-large-xlsr-53-dutch --dataset mozilla-foundation/common_voice_6_0 --config nl --split test ``` 2. To evaluate on `speech-recognition-community-v2/dev_data` ```bash python eval.py --model_id jonatasgrosman/wav2vec2-large-xlsr-53-dutch --dataset speech-recognition-community-v2/dev_data --config nl --split validation --chunk_length_s 5.0 --stride_length_s 1.0 ``` ## Citation If you want to cite this model you can use this: ```bibtex @misc{grosman2021xlsr53-large-dutch, title={Fine-tuned {XLSR}-53 large model for speech recognition in {D}utch}, author={Grosman, Jonatas}, howpublished={\url{https://huggingface.co/jonatasgrosman/wav2vec2-large-xlsr-53-dutch}}, year={2021} } ```
SAPOSS/password-model
SAPOSS
"2022-11-09T10:12:15Z"
92,899
9
transformers
[ "transformers", "tf", "roberta", "text-classification", "en", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
"2022-03-02T23:29:04Z"
--- language: - en --- # Model Card for Password-Model # Model Details ## Model Description The Password Model is intended to be used with [Credential Digger](https://github.com/SAP/credential-digger) in order to automatically filter false positive password discoveries. - **Developed by:** SAP OSS - **Shared by [Optional]:** Hugging Face - **Model type:** Text Classification - **Language(s) (NLP):** en - **License:** Apache-2.0 - **Related Models:** - **Parent Model:** RoBERTa - **Resources for more information:** - [GitHub Repo](https://github.com/SAP/credential-digger) - [Associated Paper](https://www.scitepress.org/Papers/2021/102381/102381.pdf) # Uses ## Direct Use The model is directly integrated into [Credential Digger]((https://github.com/SAP/credential-digger) and can be used to filter the false positive password discoveries of a scan. ## Out-of-Scope Use The model should not be used to intentionally create hostile or alienating environments for people. # Training Details ## Training Data [CodeBERT-base-mlm](https://huggingface.co/microsoft/codebert-base-mlm) fine-tuned on a dataset for leak detection. ## Training Procedure ### Preprocessing More information needed ### Speeds, Sizes, Times More information needed # Evaluation More information needed ## Testing Data, Factors & Metrics ### Testing Data More information needed ### Factors More information needed ### Metrics More information needed ## Results More information needed # Model Examination More information needed # Environmental Impact Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** More information needed - **Hours used:** More information needed - **Cloud Provider:** More information needed - **Compute Region:** More information needed - **Carbon Emitted:** More information needed # Technical Specifications [optional] ## Model Architecture and Objective More information needed ## Compute Infrastructure More information needed ### Hardware More information needed ### Software More information needed # Citation **BibTeX:** ``` TBD ``` # Model Card Authors [optional] SAP OSS in collaboration with Ezi Ozoani and the Hugging Face team. # Model Card Contact More information needed # How to Get Started with the Model The model is directly integrated into Credential Digger and can be used to filter the false positive discoveries of a scan <details> <summary> Click to expand </summary> ```python from transformers import AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("SAPOSS/password-model") model = AutoModelForSequenceClassification.from_pretrained("SAPOSS/password-model") ``` </details>
timm/efficientnet_b0.ra_in1k
timm
"2023-04-27T21:09:50Z"
92,882
3
timm
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "arxiv:2110.00476", "arxiv:1905.11946", "license:apache-2.0", "region:us" ]
image-classification
"2022-12-12T23:52:52Z"
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for efficientnet_b0.ra_in1k A EfficientNet image classification model. Trained on ImageNet-1k in `timm` using recipe template described below. Recipe details: * RandAugment `RA` recipe. Inspired by and evolved from EfficientNet RandAugment recipes. Published as `B` recipe in [ResNet Strikes Back](https://arxiv.org/abs/2110.00476). * RMSProp (TF 1.0 behaviour) optimizer, EMA weight averaging * Step (exponential decay w/ staircase) LR schedule with warmup ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 5.3 - GMACs: 0.4 - Activations (M): 6.7 - Image size: 224 x 224 - **Papers:** - EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks: https://arxiv.org/abs/1905.11946 - ResNet strikes back: An improved training procedure in timm: https://arxiv.org/abs/2110.00476 - **Dataset:** ImageNet-1k - **Original:** https://github.com/huggingface/pytorch-image-models ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('efficientnet_b0.ra_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'efficientnet_b0.ra_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 24, 56, 56]) # torch.Size([1, 40, 28, 28]) # torch.Size([1, 112, 14, 14]) # torch.Size([1, 320, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'efficientnet_b0.ra_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1280, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{tan2019efficientnet, title={Efficientnet: Rethinking model scaling for convolutional neural networks}, author={Tan, Mingxing and Le, Quoc}, booktitle={International conference on machine learning}, pages={6105--6114}, year={2019}, organization={PMLR} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @inproceedings{wightman2021resnet, title={ResNet strikes back: An improved training procedure in timm}, author={Wightman, Ross and Touvron, Hugo and Jegou, Herve}, booktitle={NeurIPS 2021 Workshop on ImageNet: Past, Present, and Future} } ```