modelId
stringlengths
4
81
tags
sequence
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
unknown
card
stringlengths
51
438k
AnonymousSub/declutr-model-emanuals
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: apache-2.0 --- # Graphcore/gpt2-medium-ipu Optimum Graphcore is a new open-source library and toolkit that enables developers to access IPU-optimized models certified by Hugging Face. It is an extension of Transformers, providing a set of performance optimization tools enabling maximum efficiency to train and run models on Graphcore’s IPUs - a completely new kind of massively parallel processor to accelerate machine intelligence. Learn more about how to take train Transformer models faster with IPUs at [hf.co/hardware/graphcore](https://huggingface.co/hardware/graphcore). Through HuggingFace Optimum, Graphcore released ready-to-use IPU-trained model checkpoints and IPU configuration files to make it easy to train models with maximum efficiency in the IPU. Optimum shortens the development lifecycle of your AI models by letting you plug-and-play any public dataset and allows a seamless integration to our State-of-the-art hardware giving you a quicker time-to-value for your AI project. ## Model description GPT2 is a large transformer-based language model. It is built using transformer decoder blocks. BERT, on the other hand, uses transformer encoder blocks. It adds Layer normalisation to the input of each sub-block, similar to a pre-activation residual networks and an additional layer normalisation. Paper link : [Language Models are Unsupervised Multitask Learners](https://d4mucfpksywv.cloudfront.net/better-language-models/language-models.pdf) ## Intended uses & limitations This model contains just the `IPUConfig` files for running the [HuggingFace/gpt2-medium](https://huggingface.co/gpt2-medium) model on Graphcore IPUs. **This model contains no model weights, only an IPUConfig.** ## Usage ``` from optimum.graphcore import IPUConfig ipu_config = IPUConfig.from_pretrained("Graphcore/gpt2-medium-ipu") ```
AnonymousSub/declutr-model
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: apache-2.0 --- # Graphcore/gpt2-small-ipu Optimum Graphcore is a new open-source library and toolkit that enables developers to access IPU-optimized models certified by Hugging Face. It is an extension of Transformers, providing a set of performance optimization tools enabling maximum efficiency to train and run models on Graphcore’s IPUs - a completely new kind of massively parallel processor to accelerate machine intelligence. Learn more about how to take train Transformer models faster with IPUs at [hf.co/hardware/graphcore](https://huggingface.co/hardware/graphcore). Through HuggingFace Optimum, Graphcore released ready-to-use IPU-trained model checkpoints and IPU configuration files to make it easy to train models with maximum efficiency in the IPU. Optimum shortens the development lifecycle of your AI models by letting you plug-and-play any public dataset and allows a seamless integration to our State-of-the-art hardware giving you a quicker time-to-value for your AI project. ## Model description GPT2 is a large transformer-based language model. It is built using transformer decoder blocks. BERT, on the other hand, uses transformer encoder blocks. It adds Layer normalisation to the input of each sub-block, similar to a pre-activation residual networks and an additional layer normalisation. Paper link : [Language Models are Unsupervised Multitask Learners](https://d4mucfpksywv.cloudfront.net/better-language-models/language-models.pdf) ## Intended uses & limitations This model contains just the `IPUConfig` files for running the [GPT2 Small](https://huggingface.co/gpt2) model on Graphcore IPUs. **This model contains no model weights, only an IPUConfig.** ## Usage ``` from optimum.graphcore import IPUConfig ipu_config = IPUConfig.from_pretrained("Graphcore/gpt2-small-ipu") ```
AnonymousSub/declutr-model_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: apache-2.0 --- # Graphcore/roberta-base-ipu Optimum Graphcore is a new open-source library and toolkit that enables developers to access IPU-optimized models certified by Hugging Face. It is an extension of Transformers, providing a set of performance optimization tools enabling maximum efficiency to train and run models on Graphcore’s IPUs - a completely new kind of massively parallel processor to accelerate machine intelligence. Learn more about how to take train Transformer models faster with IPUs at [hf.co/hardware/graphcore](https://huggingface.co/hardware/graphcore). Through HuggingFace Optimum, Graphcore released ready-to-use IPU-trained model checkpoints and IPU configuration files to make it easy to train models with maximum efficiency in the IPU. Optimum shortens the development lifecycle of your AI models by letting you plug-and-play any public dataset and allows a seamless integration to our State-of-the-art hardware giving you a quicker time-to-value for your AI project. ## Model description RoBERTa is based on BERT pretraining approach and improves on it by carefully evaluating a number of design decisions of BERT pretraining which it found to cause the model to be undertrained. It suggested a way to improve the performance by training the model longer, with bigger batches over more data, removing the next sentence prediction objectives, training on longer sequences and dynamically changing the mask pattern applied to the training data. As a result, it achieved state-of-the-art results on GLUE, RACE and SQuAD. Paper link : [RoBERTa: A Robustly Optimized BERT Pretraining Approach](https://arxiv.org/pdf/1907.11692.pdf) ## Intended uses & limitations This model contains just the `IPUConfig` files for running the [roberta-base](https://huggingface.co/roberta-base) model on Graphcore IPUs. ## Usage ``` from optimum.graphcore import IPUConfig ipu_config = IPUConfig.from_pretrained("Graphcore/roberta-base-ipu") ```
AnonymousSub/declutr-model_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
# Graphcore/roberta-large-ipu Optimum Graphcore is a new open-source library and toolkit that enables developers to access IPU-optimized models certified by Hugging Face. It is an extension of Transformers, providing a set of performance optimization tools enabling maximum efficiency to train and run models on Graphcore’s IPUs - a completely new kind of massively parallel processor to accelerate machine intelligence. Learn more about how to take train Transformer models faster with IPUs at [hf.co/hardware/graphcore](https://huggingface.co/hardware/graphcore). Through HuggingFace Optimum, Graphcore released ready-to-use IPU-trained model checkpoints and IPU configuration files to make it easy to train models with maximum efficiency in the IPU. Optimum shortens the development lifecycle of your AI models by letting you plug-and-play any public dataset and allows a seamless integration to our State-of-the-art hardware giving you a quicker time-to-value for your AI project. ## Model description RoBERTa is based on BERT pretraining approach and improves on it by carefully evaluating a number of design decisions of BERT pretraining which it found to cause the model to be undertrained. It suggested a way to improve the performance by training the model longer, with bigger batches over more data, removing the next sentence prediction objectives, training on longer sequences and dynamically changing the mask pattern applied to the training data. As a result, it achieved state-of-the-art results on GLUE, RACE and SQuAD. Paper link : [RoBERTa: A Robustly Optimized BERT Pretraining Approach](https://arxiv.org/pdf/1907.11692.pdf) ## Intended uses & limitations This model contains just the `IPUConfig` files for running the [roberta-large](https://huggingface.co/roberta-large) model on Graphcore IPUs. **This model contains no model weights, only an IPUConfig.** ## Usage ``` from optimum.graphcore import IPUConfig ipu_config = IPUConfig.from_pretrained("Graphcore/roberta-large-ipu") ```
AnonymousSub/declutr-roberta-papers
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: apache-2.0 --- # Graphcore/t5-small-ipu Optimum Graphcore is a new open-source library and toolkit that enables developers to access IPU-optimized models certified by Hugging Face. It is an extension of Transformers, providing a set of performance optimization tools enabling maximum efficiency to train and run models on Graphcore’s IPUs - a completely new kind of massively parallel processor to accelerate machine intelligence. Learn more about how to take train Transformer models faster with IPUs at [hf.co/hardware/graphcore](https://huggingface.co/hardware/graphcore). Through HuggingFace Optimum, Graphcore released ready-to-use IPU-trained model checkpoints and IPU configuration files to make it easy to train models with maximum efficiency in the IPU. Optimum shortens the development lifecycle of your AI models by letting you plug-and-play any public dataset and allows a seamless integration to our State-of-the-art hardware giving you a quicker time-to-value for your AI project. ## Model description Text-to-Text Transfer Transformer (T5), is a Transformer based model that uses a text-to-text approach for translation, question answering, and classification. It introduces an unified framework that converts all text-based language problems into a text-to-text format for transfer learning for NLP. This allows for the use of the same model, loss function, hyperparameters, etc. across our diverse set of tasks. Paper link :[Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/pdf/1910.10683.pdf) ## Intended uses & limitations This model contains just the `IPUConfig` files for running the T5 Small model (e.g. [HuggingFace/t5-small](https://huggingface.co/t5-small)) on Graphcore IPUs. **This model contains no model weights, only an IPUConfig.** ## Usage ``` from optimum.graphcore import IPUConfig ipu_config = IPUConfig.from_pretrained("Graphcore/t5-small-ipu") ```
AnonymousSub/declutr-s10-AR
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
# Graphcore/vit-base-ipu Optimum Graphcore is a new open-source library and toolkit that enables developers to access IPU-optimized models certified by Hugging Face. It is an extension of Transformers, providing a set of performance optimization tools enabling maximum efficiency to train and run models on Graphcore’s IPUs - a completely new kind of massively parallel processor to accelerate machine intelligence. Learn more about how to take train Transformer models faster with IPUs at [hf.co/hardware/graphcore](https://huggingface.co/hardware/graphcore). Through HuggingFace Optimum, Graphcore released ready-to-use IPU-trained model checkpoints and IPU configuration files to make it easy to train models with maximum efficiency in the IPU. Optimum shortens the development lifecycle of your AI models by letting you plug-and-play any public dataset and allows a seamless integration to our State-of-the-art hardware giving you a quicker time-to-value for your AI project. ## Model description The Vision Transformer (ViT) is a model for image recognition that employs a Transformer-like architecture over patches of the image which was widely used for NLP pretraining. It uses a standard Transformer encoder as used in NLP and simple, yet scalable, strategy works surprisingly well when coupled with pre-training on large amounts of dataset and tranferred to multiple size image recognition benchmarks while requiring substantially fewer computational resources to train. Paper link : [AN IMAGE IS WORTH 16X16 WORDS:TRANSFORMERS FOR IMAGE RECOGNITION AT SCALE](https://arxiv.org/pdf/2010.11929.pdf) ## Intended uses & limitations This model contains just the `IPUConfig` files for running the ViT base model (e.g. [vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) or [deit-base-patch16-384](https://huggingface.co/facebook/deit-base-patch16-384)) on Graphcore IPUs. **This model contains no model weights, only an IPUConfig.** ## Usage ``` from optimum.graphcore import IPUConfig ipu_config = IPUConfig.from_pretrained("Graphcore/vit-base-ipu") ```
AnonymousSub/dummy_2
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
39
null
--- tags: - adapter-transformers - adapterhub:quality_estimation/wmt21 - bert --- # Adapter `Gregor/bert-base-multilingual-cased-wmt21-qe` for bert-base-multilingual-cased An [adapter](https://adapterhub.ml) for the bert-base-multilingual-cased model that was trained on the [quality_estimation/wmt21](https://adapterhub.ml/explore/quality_estimation/wmt21/) dataset and includes a prediction head for classification. This adapter was created for usage with the **[adapter-transformers](https://github.com/Adapter-Hub/adapter-transformers)** library. ## Usage First, install `adapter-transformers`: ``` pip install -U adapter-transformers ``` _Note: adapter-transformers is a fork of transformers that acts as a drop-in replacement with adapter support. [More](https://docs.adapterhub.ml/installation.html)_ Now, the adapter can be loaded and activated like this: ```python from transformers import AutoModelWithHeads model = AutoModelWithHeads.from_pretrained("bert-base-multilingual-cased") adapter_name = model.load_adapter("Gregor/bert-base-multilingual-cased-wmt21-qe") model.active_adapters = adapter_name ``` ## Architecture & Training <!-- Add some description here --> ## Evaluation results <!-- Add some description here --> ## Citation <!-- Add some description here -->
AnonymousSub/rule_based_bert_mean_diff_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - conversational --- # The Owl House DialoGPT Model
AnonymousSub/rule_based_bert_mean_diff_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- language: fy tags: - BERTje --- Wietse de Vries • Martijn Bartelds • Malvina Nissim • Martijn Wieling # Adapting Monolingual Models: Data can be Scarce when Language Similarity is High This model is part of this paper + code: - 📝 [Paper](https://arxiv.org/abs/2105.02855) - 💻 [Code](https://github.com/wietsedv/low-resource-adapt) ## Models The best fine-tuned models for Gronings and West Frisian are available on the HuggingFace model hub: ### Lexical layers These models are identical to [BERTje](https://github.com/wietsedv/bertje), but with different lexical layers (`bert.embeddings.word_embeddings`). - 🤗 [`GroNLP/bert-base-dutch-cased`](https://huggingface.co/GroNLP/bert-base-dutch-cased) (Dutch; source language) - 🤗 [`GroNLP/bert-base-dutch-cased-gronings`](https://huggingface.co/GroNLP/bert-base-dutch-cased-gronings) (Gronings) - 🤗 [`GroNLP/bert-base-dutch-cased-frisian`](https://huggingface.co/GroNLP/bert-base-dutch-cased-frisian) (West Frisian) ### POS tagging These models share the same fine-tuned Transformer layers + classification head, but with the retrained lexical layers from the models above. - 🤗 [`GroNLP/bert-base-dutch-cased-upos-alpino`](https://huggingface.co/GroNLP/bert-base-dutch-cased-upos-alpino) (Dutch) - 🤗 [`GroNLP/bert-base-dutch-cased-upos-alpino-gronings`](https://huggingface.co/GroNLP/bert-base-dutch-cased-upos-alpino-gronings) (Gronings) - 🤗 [`GroNLP/bert-base-dutch-cased-upos-alpino-frisian`](https://huggingface.co/GroNLP/bert-base-dutch-cased-upos-alpino-frisian) (West Frisian)
AnonymousSub/rule_based_bert_quadruplet_epochs_1_shard_1_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- language: gos tags: - BERTje - pos --- Wietse de Vries • Martijn Bartelds • Malvina Nissim • Martijn Wieling # Adapting Monolingual Models: Data can be Scarce when Language Similarity is High This model is part of this paper + code: - 📝 [Paper](https://arxiv.org/abs/2105.02855) - 💻 [Code](https://github.com/wietsedv/low-resource-adapt) ## Models The best fine-tuned models for Gronings and West Frisian are available on the HuggingFace model hub: ### Lexical layers These models are identical to [BERTje](https://github.com/wietsedv/bertje), but with different lexical layers (`bert.embeddings.word_embeddings`). - 🤗 [`GroNLP/bert-base-dutch-cased`](https://huggingface.co/GroNLP/bert-base-dutch-cased) (Dutch; source language) - 🤗 [`GroNLP/bert-base-dutch-cased-gronings`](https://huggingface.co/GroNLP/bert-base-dutch-cased-gronings) (Gronings) - 🤗 [`GroNLP/bert-base-dutch-cased-frisian`](https://huggingface.co/GroNLP/bert-base-dutch-cased-frisian) (West Frisian) ### POS tagging These models share the same fine-tuned Transformer layers + classification head, but with the retrained lexical layers from the models above. - 🤗 [`GroNLP/bert-base-dutch-cased-upos-alpino`](https://huggingface.co/GroNLP/bert-base-dutch-cased-upos-alpino) (Dutch) - 🤗 [`GroNLP/bert-base-dutch-cased-upos-alpino-gronings`](https://huggingface.co/GroNLP/bert-base-dutch-cased-upos-alpino-gronings) (Gronings) - 🤗 [`GroNLP/bert-base-dutch-cased-upos-alpino-frisian`](https://huggingface.co/GroNLP/bert-base-dutch-cased-upos-alpino-frisian) (West Frisian)
AnonymousSub/rule_based_bert_quadruplet_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
--- language: nl tags: - BERTje - pos --- Wietse de Vries • Martijn Bartelds • Malvina Nissim • Martijn Wieling # Adapting Monolingual Models: Data can be Scarce when Language Similarity is High This model is part of this paper + code: - 📝 [Paper](https://arxiv.org/abs/2105.02855) - 💻 [Code](https://github.com/wietsedv/low-resource-adapt) ## Models The best fine-tuned models for Gronings and West Frisian are available on the HuggingFace model hub: ### Lexical layers These models are identical to [BERTje](https://github.com/wietsedv/bertje), but with different lexical layers (`bert.embeddings.word_embeddings`). - 🤗 [`GroNLP/bert-base-dutch-cased`](https://huggingface.co/GroNLP/bert-base-dutch-cased) (Dutch; source language) - 🤗 [`GroNLP/bert-base-dutch-cased-gronings`](https://huggingface.co/GroNLP/bert-base-dutch-cased-gronings) (Gronings) - 🤗 [`GroNLP/bert-base-dutch-cased-frisian`](https://huggingface.co/GroNLP/bert-base-dutch-cased-frisian) (West Frisian) ### POS tagging These models share the same fine-tuned Transformer layers + classification head, but with the retrained lexical layers from the models above. - 🤗 [`GroNLP/bert-base-dutch-cased-upos-alpino`](https://huggingface.co/GroNLP/bert-base-dutch-cased-upos-alpino) (Dutch) - 🤗 [`GroNLP/bert-base-dutch-cased-upos-alpino-gronings`](https://huggingface.co/GroNLP/bert-base-dutch-cased-upos-alpino-gronings) (Gronings) - 🤗 [`GroNLP/bert-base-dutch-cased-upos-alpino-frisian`](https://huggingface.co/GroNLP/bert-base-dutch-cased-upos-alpino-frisian) (West Frisian)
AnonymousSub/rule_based_bert_triplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- language: nl thumbnail: "https://raw.githubusercontent.com/wietsedv/bertje/master/bertje.png" tags: - BERTje --- # BERTje: A Dutch BERT model [Wietse de Vries](https://www.semanticscholar.org/author/Wietse-de-Vries/144611157) • [Andreas van Cranenburgh](https://www.semanticscholar.org/author/Andreas-van-Cranenburgh/2791585) • [Arianna Bisazza](https://www.semanticscholar.org/author/Arianna-Bisazza/3242253) • [Tommaso Caselli](https://www.semanticscholar.org/author/Tommaso-Caselli/1864635) • [Gertjan van Noord](https://www.semanticscholar.org/author/Gertjan-van-Noord/143715131) • [Malvina Nissim](https://www.semanticscholar.org/author/M.-Nissim/2742475) ## Model description BERTje is a Dutch pre-trained BERT model developed at the University of Groningen. <img src="https://raw.githubusercontent.com/wietsedv/bertje/master/bertje.png" height="250"> For details, check out our paper on [arXiv](https://arxiv.org/abs/1912.09582), the code on [Github](https://github.com/wietsedv/bertje) and related work on [Semantic Scholar](https://www.semanticscholar.org/paper/BERTje%3A-A-Dutch-BERT-Model-Vries-Cranenburgh/a4d5e425cac0bf84c86c0c9f720b6339d6288ffa). The paper and Github page mention fine-tuned models that are available [here](https://huggingface.co/wietsedv). ## How to use ```python from transformers import AutoTokenizer, AutoModel, TFAutoModel tokenizer = AutoTokenizer.from_pretrained("GroNLP/bert-base-dutch-cased") model = AutoModel.from_pretrained("GroNLP/bert-base-dutch-cased") # PyTorch model = TFAutoModel.from_pretrained("GroNLP/bert-base-dutch-cased") # Tensorflow ``` **WARNING:** The vocabulary size of BERTje has changed in 2021. If you use an older fine-tuned model and experience problems with the `GroNLP/bert-base-dutch-cased` tokenizer, use use the following tokenizer: ```python tokenizer = AutoTokenizer.from_pretrained("GroNLP/bert-base-dutch-cased", revision="v1") # v1 is the old vocabulary ``` ## Benchmarks The arXiv paper lists benchmarks. Here are a couple of comparisons between BERTje, multilingual BERT, BERT-NL and RobBERT that were done after writing the paper. Unlike some other comparisons, the fine-tuning procedures for these benchmarks are identical for each pre-trained model. You may be able to achieve higher scores for individual models by optimizing fine-tuning procedures. More experimental results will be added to this page when they are finished. Technical details about how a fine-tuned these models will be published later as well as downloadable fine-tuned checkpoints. All of the tested models are *base* sized (12) layers with cased tokenization. Headers in the tables below link to original data sources. Scores link to the model pages that corresponds to that specific fine-tuned model. These tables will be updated when more simple fine-tuned models are made available. ### Named Entity Recognition | Model | [CoNLL-2002](https://www.clips.uantwerpen.be/conll2002/ner/) | [SoNaR-1](https://ivdnt.org/downloads/taalmaterialen/tstc-sonar-corpus) | spaCy UD LassySmall | | ---------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | | **BERTje** | [**90.24**](https://huggingface.co/wietsedv/bert-base-dutch-cased-finetuned-conll2002-ner) | [**84.93**](https://huggingface.co/wietsedv/bert-base-dutch-cased-finetuned-sonar-ner) | [86.10](https://huggingface.co/wietsedv/bert-base-dutch-cased-finetuned-udlassy-ner) | | [mBERT](https://github.com/google-research/bert/blob/master/multilingual.md) | [88.61](https://huggingface.co/wietsedv/bert-base-multilingual-cased-finetuned-conll2002-ner) | [84.19](https://huggingface.co/wietsedv/bert-base-multilingual-cased-finetuned-sonar-ner) | [**86.77**](https://huggingface.co/wietsedv/bert-base-multilingual-cased-finetuned-udlassy-ner) | | [BERT-NL](http://textdata.nl) | 85.05 | 80.45 | 81.62 | | [RobBERT](https://github.com/iPieter/RobBERT) | 84.72 | 81.98 | 79.84 | ### Part-of-speech tagging | Model | [UDv2.5 LassySmall](https://universaldependencies.org/treebanks/nl_lassysmall/index.html) | | ---------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | | **BERTje** | **96.48** | | [mBERT](https://github.com/google-research/bert/blob/master/multilingual.md) | 96.20 | | [BERT-NL](http://textdata.nl) | 96.10 | | [RobBERT](https://github.com/iPieter/RobBERT) | 95.91 | ### BibTeX entry and citation info ```bibtex @misc{devries2019bertje, \ttitle = {{BERTje}: {A} {Dutch} {BERT} {Model}}, \tshorttitle = {{BERTje}}, \tauthor = {de Vries, Wietse and van Cranenburgh, Andreas and Bisazza, Arianna and Caselli, Tommaso and Noord, Gertjan van and Nissim, Malvina}, \tyear = {2019}, \tmonth = dec, \thowpublished = {arXiv:1912.09582}, \turl = {http://arxiv.org/abs/1912.09582}, } ```
AnonymousSub/rule_based_bert_triplet_epochs_1_shard_1_wikiqa_copy
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- language: nl tags: - adaption - recycled - gpt2-small pipeline_tag: text-generation --- # GPT-2 recycled for Dutch (small) [Wietse de Vries](https://www.semanticscholar.org/author/Wietse-de-Vries/144611157) • [Malvina Nissim](https://www.semanticscholar.org/author/M.-Nissim/2742475) ## Model description This model is based on the small OpenAI GPT-2 ([`gpt2`](https://huggingface.co/gpt2)) model. For details, check out our paper on [arXiv](https://arxiv.org/abs/2012.05628) and the code on [Github](https://github.com/wietsedv/gpt2-recycle). ## Related models ### Dutch - [`gpt2-small-dutch-embeddings`](https://huggingface.co/GroNLP/gpt2-small-dutch-embeddings): Small model size with only retrained lexical embeddings. - [`gpt2-small-dutch`](https://huggingface.co/GroNLP/gpt2-small-dutch): Small model size with retrained lexical embeddings and additional fine-tuning of the full model. (**Recommended**) - [`gpt2-medium-dutch-embeddings`](https://huggingface.co/GroNLP/gpt2-medium-dutch-embeddings): Medium model size with only retrained lexical embeddings. ### Italian - [`gpt2-small-italian-embeddings`](https://huggingface.co/GroNLP/gpt2-small-italian-embeddings): Small model size with only retrained lexical embeddings. - [`gpt2-small-italian`](https://huggingface.co/GroNLP/gpt2-small-italian): Small model size with retrained lexical embeddings and additional fine-tuning of the full model. (**Recommended**) - [`gpt2-medium-italian-embeddings`](https://huggingface.co/GroNLP/gpt2-medium-italian-embeddings): Medium model size with only retrained lexical embeddings. ## How to use ```python from transformers import pipeline pipe = pipeline("text-generation", model="GroNLP/gpt2-small-dutch") ``` ```python from transformers import AutoTokenizer, AutoModel, TFAutoModel tokenizer = AutoTokenizer.from_pretrained("GroNLP/gpt2-small-dutch") model = AutoModel.from_pretrained("GroNLP/gpt2-small-dutch") # PyTorch model = TFAutoModel.from_pretrained("GroNLP/gpt2-small-dutch") # Tensorflow ``` ## BibTeX entry ```bibtex @misc{devries2020good, title={As good as new. How to successfully recycle English GPT-2 to make models for other languages}, author={Wietse de Vries and Malvina Nissim}, year={2020}, eprint={2012.05628}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
AnonymousSub/rule_based_hier_quadruplet_0.1_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- language: it tags: - adaption - recycled - gpt2-small pipeline_tag: text-generation --- # GPT-2 recycled for Italian (small, adapted lexical embeddings) [Wietse de Vries](https://www.semanticscholar.org/author/Wietse-de-Vries/144611157) • [Malvina Nissim](https://www.semanticscholar.org/author/M.-Nissim/2742475) ## Model description This model is based on the small OpenAI GPT-2 ([`gpt2`](https://huggingface.co/gpt2)) model. The Transformer layer weights in this model are identical to the original English, model but the lexical layer has been retrained for an Italian vocabulary. For details, check out our paper on [arXiv](https://arxiv.org/abs/2012.05628) and the code on [Github](https://github.com/wietsedv/gpt2-recycle). ## Related models ### Dutch - [`gpt2-small-dutch-embeddings`](https://huggingface.co/GroNLP/gpt2-small-dutch-embeddings): Small model size with only retrained lexical embeddings. - [`gpt2-small-dutch`](https://huggingface.co/GroNLP/gpt2-small-dutch): Small model size with retrained lexical embeddings and additional fine-tuning of the full model. (**Recommended**) - [`gpt2-medium-dutch-embeddings`](https://huggingface.co/GroNLP/gpt2-medium-dutch-embeddings): Medium model size with only retrained lexical embeddings. ### Italian - [`gpt2-small-italian-embeddings`](https://huggingface.co/GroNLP/gpt2-small-italian-embeddings): Small model size with only retrained lexical embeddings. - [`gpt2-small-italian`](https://huggingface.co/GroNLP/gpt2-small-italian): Small model size with retrained lexical embeddings and additional fine-tuning of the full model. (**Recommended**) - [`gpt2-medium-italian-embeddings`](https://huggingface.co/GroNLP/gpt2-medium-italian-embeddings): Medium model size with only retrained lexical embeddings. ## How to use ```python from transformers import pipeline pipe = pipeline("text-generation", model="GroNLP/gpt2-small-italian-embeddings") ``` ```python from transformers import AutoTokenizer, AutoModel, TFAutoModel tokenizer = AutoTokenizer.from_pretrained("GroNLP/gpt2-small-italian-embeddings") model = AutoModel.from_pretrained("GroNLP/gpt2-small-italian-embeddings") # PyTorch model = TFAutoModel.from_pretrained("GroNLP/gpt2-small-italian-embeddings") # Tensorflow ``` ## BibTeX entry ```bibtex @misc{devries2020good, title={As good as new. How to successfully recycle English GPT-2 to make models for other languages}, author={Wietse de Vries and Malvina Nissim}, year={2020}, eprint={2012.05628}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
AnonymousSub/rule_based_hier_triplet_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - conversational --- # Game of Thrones DialoGPT Model
AnonymousSub/rule_based_roberta_bert_triplet_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
"2022-01-11T16:30:27Z"
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilbert-base-cased-finetuned results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-cased-finetuned This model is a fine-tuned version of [distilbert-base-cased](https://huggingface.co/distilbert-base-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.9161 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.3101 | 1.0 | 974 | 2.0502 | | 2.0831 | 2.0 | 1948 | 1.9627 | | 2.0198 | 3.0 | 2922 | 1.8998 | ### Framework versions - Transformers 4.12.5 - Pytorch 1.9.1 - Datasets 1.16.1 - Tokenizers 0.10.3
AnonymousSub/rule_based_roberta_twostage_quadruplet_epochs_1_shard_1_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - conversational --- #Harry Potter DialoGPT Model
AnonymousSub/unsup-consert-base_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
"2022-02-05T00:33:22Z"
--- language: - ur license: apache-2.0 tags: - automatic-speech-recognition - mozilla-foundation/common_voice_7_0 - generated_from_trainer datasets: - common_voice model-index: - name: '' results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_7_0 - UR dataset. It achieves the following results on the evaluation set: - Loss: 1.2924 - Wer: 0.7201 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 7.5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - num_epochs: 200.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:------:|:----:|:---------------:|:------:| | 11.2783 | 4.17 | 100 | 4.6409 | 1.0 | | 3.5578 | 8.33 | 200 | 3.1649 | 1.0 | | 3.1279 | 12.5 | 300 | 3.0335 | 1.0 | | 2.9944 | 16.67 | 400 | 2.9526 | 0.9983 | | 2.9275 | 20.83 | 500 | 2.9291 | 1.0009 | | 2.8077 | 25.0 | 600 | 2.5633 | 0.9895 | | 2.4438 | 29.17 | 700 | 1.9045 | 0.9564 | | 1.9659 | 33.33 | 800 | 1.4114 | 0.7960 | | 1.7092 | 37.5 | 900 | 1.2584 | 0.7637 | | 1.517 | 41.67 | 1000 | 1.2040 | 0.7507 | | 1.3966 | 45.83 | 1100 | 1.1273 | 0.7463 | | 1.3197 | 50.0 | 1200 | 1.1054 | 0.6957 | | 1.2476 | 54.17 | 1300 | 1.1035 | 0.7001 | | 1.1796 | 58.33 | 1400 | 1.0890 | 0.7097 | | 1.1237 | 62.5 | 1500 | 1.0883 | 0.7167 | | 1.0777 | 66.67 | 1600 | 1.1067 | 0.7219 | | 1.0051 | 70.83 | 1700 | 1.1115 | 0.7236 | | 0.9521 | 75.0 | 1800 | 1.0867 | 0.7132 | | 0.9147 | 79.17 | 1900 | 1.0852 | 0.7210 | | 0.8798 | 83.33 | 2000 | 1.1411 | 0.7097 | | 0.8317 | 87.5 | 2100 | 1.1634 | 0.7018 | | 0.7946 | 91.67 | 2200 | 1.1621 | 0.7201 | | 0.7594 | 95.83 | 2300 | 1.1482 | 0.7036 | | 0.729 | 100.0 | 2400 | 1.1493 | 0.7062 | | 0.7055 | 104.17 | 2500 | 1.1726 | 0.6931 | | 0.6622 | 108.33 | 2600 | 1.1938 | 0.7001 | | 0.6583 | 112.5 | 2700 | 1.1832 | 0.7149 | | 0.6299 | 116.67 | 2800 | 1.1996 | 0.7175 | | 0.5903 | 120.83 | 2900 | 1.1986 | 0.7132 | | 0.5816 | 125.0 | 3000 | 1.1909 | 0.7010 | | 0.5583 | 129.17 | 3100 | 1.2079 | 0.6870 | | 0.5392 | 133.33 | 3200 | 1.2109 | 0.7228 | | 0.5412 | 137.5 | 3300 | 1.2353 | 0.7245 | | 0.5136 | 141.67 | 3400 | 1.2390 | 0.7254 | | 0.5007 | 145.83 | 3500 | 1.2273 | 0.7123 | | 0.4883 | 150.0 | 3600 | 1.2773 | 0.7289 | | 0.4835 | 154.17 | 3700 | 1.2678 | 0.7289 | | 0.4568 | 158.33 | 3800 | 1.2592 | 0.7350 | | 0.4525 | 162.5 | 3900 | 1.2705 | 0.7254 | | 0.4379 | 166.67 | 4000 | 1.2717 | 0.7306 | | 0.4198 | 170.83 | 4100 | 1.2618 | 0.7219 | | 0.4216 | 175.0 | 4200 | 1.2909 | 0.7158 | | 0.4305 | 179.17 | 4300 | 1.2808 | 0.7167 | | 0.399 | 183.33 | 4400 | 1.2750 | 0.7193 | | 0.3937 | 187.5 | 4500 | 1.2719 | 0.7149 | | 0.3905 | 191.67 | 4600 | 1.2816 | 0.7158 | | 0.3892 | 195.83 | 4700 | 1.2951 | 0.7210 | | 0.3932 | 200.0 | 4800 | 1.2924 | 0.7201 | ### Framework versions - Transformers 4.17.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.18.3 - Tokenizers 0.11.0
AnonymousSub/unsup-consert-emanuals
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- language: - ur license: apache-2.0 tags: - automatic-speech-recognition - mozilla-foundation/common_voice_8_0 - generated_from_trainer datasets: - common_voice model-index: - name: '' results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # This model is a fine-tuned version of [DrishtiSharma/wav2vec2-large-xls-r-300m-hi-d3](https://huggingface.co/DrishtiSharma/wav2vec2-large-xls-r-300m-hi-d3) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - UR dataset. It achieves the following results on the evaluation set: - Loss: 1.5443 - Wer: 0.7030 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.000388 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 750 - num_epochs: 100.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 10.7052 | 1.96 | 100 | 3.4683 | 1.0 | | 3.2395 | 3.92 | 200 | 3.1489 | 1.0 | | 2.9951 | 5.88 | 300 | 2.9823 | 1.0007 | | 2.3574 | 7.84 | 400 | 1.2614 | 0.7598 | | 1.7287 | 9.8 | 500 | 1.1817 | 0.7421 | | 1.6144 | 11.76 | 600 | 1.1315 | 0.7321 | | 1.5598 | 13.73 | 700 | 1.2322 | 0.7550 | | 1.5418 | 15.69 | 800 | 1.2721 | 0.7819 | | 1.4578 | 17.65 | 900 | 1.1710 | 0.7531 | | 1.4311 | 19.61 | 1000 | 1.2042 | 0.7491 | | 1.3483 | 21.57 | 1100 | 1.1702 | 0.7465 | | 1.3078 | 23.53 | 1200 | 1.1963 | 0.7421 | | 1.2576 | 25.49 | 1300 | 1.1501 | 0.7280 | | 1.2173 | 27.45 | 1400 | 1.2526 | 0.7299 | | 1.2217 | 29.41 | 1500 | 1.2479 | 0.7310 | | 1.1536 | 31.37 | 1600 | 1.2567 | 0.7432 | | 1.0939 | 33.33 | 1700 | 1.2801 | 0.7247 | | 1.0745 | 35.29 | 1800 | 1.2340 | 0.7151 | | 1.0454 | 37.25 | 1900 | 1.2372 | 0.7151 | | 1.0101 | 39.22 | 2000 | 1.2461 | 0.7376 | | 0.9833 | 41.18 | 2100 | 1.2553 | 0.7269 | | 0.9314 | 43.14 | 2200 | 1.2372 | 0.7015 | | 0.9147 | 45.1 | 2300 | 1.3035 | 0.7358 | | 0.8758 | 47.06 | 2400 | 1.2598 | 0.7092 | | 0.8356 | 49.02 | 2500 | 1.2557 | 0.7144 | | 0.8105 | 50.98 | 2600 | 1.2619 | 0.7236 | | 0.7947 | 52.94 | 2700 | 1.3994 | 0.7491 | | 0.7623 | 54.9 | 2800 | 1.2932 | 0.7133 | | 0.7282 | 56.86 | 2900 | 1.2799 | 0.7089 | | 0.7108 | 58.82 | 3000 | 1.3615 | 0.7148 | | 0.6896 | 60.78 | 3100 | 1.3129 | 0.7041 | | 0.6496 | 62.75 | 3200 | 1.4050 | 0.6934 | | 0.6075 | 64.71 | 3300 | 1.3571 | 0.7026 | | 0.6242 | 66.67 | 3400 | 1.3369 | 0.7063 | | 0.5865 | 68.63 | 3500 | 1.4368 | 0.7140 | | 0.5721 | 70.59 | 3600 | 1.4224 | 0.7066 | | 0.5475 | 72.55 | 3700 | 1.4798 | 0.7118 | | 0.5086 | 74.51 | 3800 | 1.5107 | 0.7232 | | 0.4958 | 76.47 | 3900 | 1.4849 | 0.7089 | | 0.5046 | 78.43 | 4000 | 1.4451 | 0.7114 | | 0.4694 | 80.39 | 4100 | 1.4674 | 0.7089 | | 0.4386 | 82.35 | 4200 | 1.5245 | 0.7103 | | 0.4516 | 84.31 | 4300 | 1.5032 | 0.7103 | | 0.4113 | 86.27 | 4400 | 1.5246 | 0.7196 | | 0.3972 | 88.24 | 4500 | 1.5318 | 0.7114 | | 0.4006 | 90.2 | 4600 | 1.5543 | 0.6982 | | 0.4014 | 92.16 | 4700 | 1.5442 | 0.7048 | | 0.3672 | 94.12 | 4800 | 1.5542 | 0.7137 | | 0.3666 | 96.08 | 4900 | 1.5414 | 0.7018 | | 0.3574 | 98.04 | 5000 | 1.5465 | 0.7059 | | 0.3428 | 100.0 | 5100 | 1.5443 | 0.7030 | ### Framework versions - Transformers 4.17.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.18.3 - Tokenizers 0.11.0
AnonymousSub/unsup-consert-papers
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- language: - ur license: apache-2.0 tags: - automatic-speech-recognition - mozilla-foundation/common_voice_8_0 - generated_from_trainer - ur - robust-speech-event - hf-asr-leaderboard datasets: - mozilla-foundation/common_voice_8_0 model-index: - name: '' results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 8.0 type: mozilla-foundation/common_voice_8_0 args: ur metrics: - name: Test WER type: wer value: 62.47 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - UR dataset. It achieves the following results on the evaluation set: - Loss: 0.8888 - Wer: 0.6642 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 7.5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 50 - num_epochs: 50.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 10.1224 | 1.96 | 100 | 3.5429 | 1.0 | | 3.2411 | 3.92 | 200 | 3.1786 | 1.0 | | 3.1283 | 5.88 | 300 | 3.0571 | 1.0 | | 3.0044 | 7.84 | 400 | 2.9560 | 0.9996 | | 2.9388 | 9.8 | 500 | 2.8977 | 1.0011 | | 2.86 | 11.76 | 600 | 2.6944 | 0.9952 | | 2.5538 | 13.73 | 700 | 2.0967 | 0.9435 | | 2.1214 | 15.69 | 800 | 1.4816 | 0.8428 | | 1.8136 | 17.65 | 900 | 1.2459 | 0.8048 | | 1.6795 | 19.61 | 1000 | 1.1232 | 0.7649 | | 1.5571 | 21.57 | 1100 | 1.0510 | 0.7432 | | 1.4975 | 23.53 | 1200 | 1.0298 | 0.6963 | | 1.4485 | 25.49 | 1300 | 0.9775 | 0.7074 | | 1.3924 | 27.45 | 1400 | 0.9798 | 0.6956 | | 1.3604 | 29.41 | 1500 | 0.9345 | 0.7092 | | 1.3224 | 31.37 | 1600 | 0.9535 | 0.6830 | | 1.2816 | 33.33 | 1700 | 0.9178 | 0.6679 | | 1.2623 | 35.29 | 1800 | 0.9249 | 0.6679 | | 1.2421 | 37.25 | 1900 | 0.9124 | 0.6734 | | 1.2208 | 39.22 | 2000 | 0.8962 | 0.6664 | | 1.2145 | 41.18 | 2100 | 0.8903 | 0.6734 | | 1.1888 | 43.14 | 2200 | 0.8883 | 0.6708 | | 1.1933 | 45.1 | 2300 | 0.8928 | 0.6723 | | 1.1838 | 47.06 | 2400 | 0.8868 | 0.6679 | | 1.1634 | 49.02 | 2500 | 0.8886 | 0.6657 | ### Framework versions - Transformers 4.17.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.18.3 - Tokenizers 0.11.0
ArBert/bert-base-uncased-finetuned-ner
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- language: hi tags: - hf-asr-leaderboard - hi - model_for_talk - pretrained - robust-speech-event - speech license: apache-2.0 --- Hindi Pretrained model on 4200 hours. [Link](https://arxiv.org/abs/2107.07402)
ArJakusz/DialoGPT-small-starky
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
"2021-05-24T18:43:03Z"
--- language: en license: apache-2.0 datasets: - hatexplain --- ## Table of Contents - [Model Details](#model-details) - [How to Get Started With the Model](#how-to-get-started-with-the-model) - [Uses](#uses) - [Risks, Limitations and Biases](#risks-limitations-and-biases) - [Training](#training) - [Evaluation](#evaluation) - [Technical Specifications](#technical-specifications) - [Citation Information](#citation-information) ## Model Details **Model Description:** The model is used for classifying a text as Abusive (Hatespeech and Offensive) or Normal. The model is trained using data from Gab and Twitter and Human Rationales were included as part of the training data to boost the performance. The model also has a rationale predictor head that can predict the rationales given an abusive sentence - **Developed by:** Binny Mathew, Punyajoy Saha, Seid Muhie Yimam, Chris Biemann, Pawan Goyal, and Animesh Mukherjee - **Model Type:** Text Classification - **Language(s):** English - **License:** Apache-2.0 - **Parent Model:** See the [BERT base uncased model](https://huggingface.co/bert-base-uncased) for more information about the BERT base model. - **Resources for more information:** - [Research Paper](https://arxiv.org/abs/2012.10289) Accepted at AAAI 2021. - [GitHub Repo with datatsets and models](https://github.com/punyajoy/HateXplain) ## How to Get Started with the Model **Details of usage** Please use the **Model_Rational_Label** class inside [models.py](models.py) to load the models. The default prediction in this hosted inference API may be wrong due to the use of different class initialisations. ```python from transformers import AutoTokenizer, AutoModelForSequenceClassification ### from models.py from models import * tokenizer = AutoTokenizer.from_pretrained("Hate-speech-CNERG/bert-base-uncased-hatexplain-rationale-two") model = Model_Rational_Label.from_pretrained("Hate-speech-CNERG/bert-base-uncased-hatexplain-rationale-two") inputs = tokenizer('He is a great guy", return_tensors="pt") prediction_logits, _ = model(input_ids=inputs['input_ids'],attention_mask=inputs['attention_mask']) ``` ## Uses #### Direct Use This model can be used for Text Classification #### Downstream Use [More information needed] #### Misuse and Out-of-scope Use The model should not be used to intentionally create hostile or alienating environments for people. In addition, the model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model. ## Risks, Limitations and Biases **CONTENT WARNING: Readers should be aware this section contains content that is disturbing, offensive, and can propagate historical and current stereotypes.** Significant research has explored bias and fairness issues with language models (see, e.g., [Sheng et al. (2021)](https://aclanthology.org/2021.acl-long.330.pdf) and [Bender et al. (2021)](https://dl.acm.org/doi/pdf/10.1145/3442188.3445922)). (and if you can generate an example of a biased prediction, also something like this): Predictions generated by the model can include disturbing and harmful stereotypes across protected classes; identity characteristics; and sensitive, social, and occupational groups. For ![example:](https://github.com/hate-alert/HateXplain/blob/master/Figures/dataset_example.png) The model author's also note in their HateXplain paper that they > *have not considered any external context such as profile bio, user gender, history of posts etc., which might be helpful in the classification task. Also, in this work we have focused on the English language. It does not consider multilingual hate speech into account.* #### Training Procedure ##### Preprocessing The authors detail their preprocessing procedure in the [Github repository](https://github.com/hate-alert/HateXplain/tree/master/Preprocess) ## Evaluation The mode authors detail the Hidden layer size and attention for the HateXplain fien tuned models in the [associated paper](https://arxiv.org/pdf/2012.10289.pdf) #### Results The model authors both in their paper and in the git repository provide the illustrative output of the BERT - HateXplain in comparison to BERT and and other HateXplain fine tuned ![models]( https://github.com/hate-alert/HateXplain/blob/master/Figures/bias-subgroup.pdf) ## Citation Information ```bibtex @article{mathew2020hatexplain, title={HateXplain: A Benchmark Dataset for Explainable Hate Speech Detection}, author={Mathew, Binny and Saha, Punyajoy and Yimam, Seid Muhie and Biemann, Chris and Goyal, Pawan and Mukherjee, Animesh}, journal={arXiv preprint arXiv:2012.10289}, year={2020} } ```
Araf/Ummah
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en license: apache-2.0 --- This model is used detecting **hatespeech** in **English language**. The mono in the name refers to the monolingual setting, where the model is trained using only English language data. It is finetuned on multilingual bert model. The model is trained with different learning rates and the best validation score achieved is 0.726030 for a learning rate of 2e-5. Training code can be found here https://github.com/punyajoy/DE-LIMIT ### For more details about our paper Sai Saketh Aluru, Binny Mathew, Punyajoy Saha and Animesh Mukherjee. "[Deep Learning Models for Multilingual Hate Speech Detection](https://arxiv.org/abs/2004.06465)". Accepted at ECML-PKDD 2020. ***Please cite our paper in any published work that uses any of these resources.*** ~~~ @article{aluru2020deep, title={Deep Learning Models for Multilingual Hate Speech Detection}, author={Aluru, Sai Saket and Mathew, Binny and Saha, Punyajoy and Mukherjee, Animesh}, journal={arXiv preprint arXiv:2004.06465}, year={2020} } ~~~
AragornII/DialoGPT-small-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: fr license: apache-2.0 --- This model is used detecting **hatespeech** in **French language**. The mono in the name refers to the monolingual setting, where the model is trained using only English language data. It is finetuned on multilingual bert model. The model is trained with different learning rates and the best validation score achieved is 0.692094 for a learning rate of 3e-5. Training code can be found at this [url](https://github.com/punyajoy/DE-LIMIT) ### For more details about our paper Sai Saketh Aluru, Binny Mathew, Punyajoy Saha and Animesh Mukherjee. "[Deep Learning Models for Multilingual Hate Speech Detection](https://arxiv.org/abs/2004.06465)". Accepted at ECML-PKDD 2020. ***Please cite our paper in any published work that uses any of these resources.*** ~~~ @article{aluru2020deep, title={Deep Learning Models for Multilingual Hate Speech Detection}, author={Aluru, Sai Saket and Mathew, Binny and Saha, Punyajoy and Mukherjee, Animesh}, journal={arXiv preprint arXiv:2004.06465}, year={2020} } ~~~
Aran/DialoGPT-medium-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- language: de license: apache-2.0 --- This model is used detecting **hatespeech** in **German language**. The mono in the name refers to the monolingual setting, where the model is trained using only English language data. It is finetuned on multilingual bert model. The model is trained with different learning rates and the best validation score achieved is 0.649794 for a learning rate of 3e-5. Training code can be found at this [url](https://github.com/punyajoy/DE-LIMIT) ### For more details about our paper Sai Saketh Aluru, Binny Mathew, Punyajoy Saha and Animesh Mukherjee. "[Deep Learning Models for Multilingual Hate Speech Detection](https://arxiv.org/abs/2004.06465)". Accepted at ECML-PKDD 2020. ***Please cite our paper in any published work that uses any of these resources.*** ~~~ @article{aluru2020deep, title={Deep Learning Models for Multilingual Hate Speech Detection}, author={Aluru, Sai Saket and Mathew, Binny and Saha, Punyajoy and Mukherjee, Animesh}, journal={arXiv preprint arXiv:2004.06465}, year={2020} } ~~~
ArashEsk95/bert-base-uncased-finetuned-cola
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: it license: apache-2.0 --- This model is used detecting **hatespeech** in **Italian language**. The mono in the name refers to the monolingual setting, where the model is trained using only English language data. It is finetuned on multilingual bert model. The model is trained with different learning rates and the best validation score achieved is 0.837288 for a learning rate of 3e-5. Training code can be found at this [url](https://github.com/punyajoy/DE-LIMIT) ### For more details about our paper Sai Saketh Aluru, Binny Mathew, Punyajoy Saha and Animesh Mukherjee. "[Deep Learning Models for Multilingual Hate Speech Detection](https://arxiv.org/abs/2004.06465)". Accepted at ECML-PKDD 2020. ***Please cite our paper in any published work that uses any of these resources.*** ~~~ @article{aluru2020deep, title={Deep Learning Models for Multilingual Hate Speech Detection}, author={Aluru, Sai Saket and Mathew, Binny and Saha, Punyajoy and Mukherjee, Animesh}, journal={arXiv preprint arXiv:2004.06465}, year={2020} } ~~~
ArpanZS/debug_squad
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- language: - vi - km - aav - en tags: - translation license: apache-2.0 --- ### aav-eng * source group: Austro-Asiatic languages * target group: English * OPUS readme: [aav-eng](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/aav-eng/README.md) * model: transformer * source language(s): hoc hoc_Latn kha khm khm_Latn mnw vie vie_Hani * target language(s): eng * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus2m-2020-07-31.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/aav-eng/opus2m-2020-07-31.zip) * test set translations: [opus2m-2020-07-31.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/aav-eng/opus2m-2020-07-31.test.txt) * test set scores: [opus2m-2020-07-31.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/aav-eng/opus2m-2020-07-31.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.hoc-eng.hoc.eng | 0.3 | 0.095 | | Tatoeba-test.kha-eng.kha.eng | 1.0 | 0.115 | | Tatoeba-test.khm-eng.khm.eng | 8.9 | 0.271 | | Tatoeba-test.mnw-eng.mnw.eng | 0.8 | 0.118 | | Tatoeba-test.multi.eng | 24.8 | 0.391 | | Tatoeba-test.vie-eng.vie.eng | 38.7 | 0.567 | ### System Info: - hf_name: aav-eng - source_languages: aav - target_languages: eng - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/aav-eng/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['vi', 'km', 'aav', 'en'] - src_constituents: {'mnw', 'vie', 'kha', 'khm', 'vie_Hani', 'khm_Latn', 'hoc_Latn', 'hoc'} - tgt_constituents: {'eng'} - src_multilingual: True - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/aav-eng/opus2m-2020-07-31.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/aav-eng/opus2m-2020-07-31.test.txt - src_alpha3: aav - tgt_alpha3: eng - short_pair: aav-en - chrF2_score: 0.391 - bleu: 24.8 - brevity_penalty: 0.968 - ref_len: 36693.0 - src_name: Austro-Asiatic languages - tgt_name: English - train_date: 2020-07-31 - src_alpha2: aav - tgt_alpha2: en - prefer_old: False - long_pair: aav-eng - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
AryanLala/autonlp-Scientific_Title_Generator-34558227
[ "pytorch", "pegasus", "text2text-generation", "en", "dataset:AryanLala/autonlp-data-Scientific_Title_Generator", "transformers", "autonlp", "co2_eq_emissions", "autotrain_compatible", "has_space" ]
text2text-generation
{ "architectures": [ "PegasusForConditionalGeneration" ], "model_type": "pegasus", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
103
null
--- language: - so - ti - am - he - mt - ar - afa tags: - translation license: apache-2.0 --- ### afa-afa * source group: Afro-Asiatic languages * target group: Afro-Asiatic languages * OPUS readme: [afa-afa](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/afa-afa/README.md) * model: transformer * source language(s): apc ara arq arz heb kab mlt shy_Latn thv * target language(s): apc ara arq arz heb kab mlt shy_Latn thv * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * a sentence initial language token is required in the form of `>>id<<` (id = valid target language ID) * download original weights: [opus-2020-07-26.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/afa-afa/opus-2020-07-26.zip) * test set translations: [opus-2020-07-26.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/afa-afa/opus-2020-07-26.test.txt) * test set scores: [opus-2020-07-26.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/afa-afa/opus-2020-07-26.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.ara-ara.ara.ara | 4.3 | 0.148 | | Tatoeba-test.ara-heb.ara.heb | 31.9 | 0.525 | | Tatoeba-test.ara-kab.ara.kab | 0.3 | 0.120 | | Tatoeba-test.ara-mlt.ara.mlt | 14.0 | 0.428 | | Tatoeba-test.ara-shy.ara.shy | 1.3 | 0.050 | | Tatoeba-test.heb-ara.heb.ara | 17.0 | 0.464 | | Tatoeba-test.heb-kab.heb.kab | 1.9 | 0.104 | | Tatoeba-test.kab-ara.kab.ara | 0.3 | 0.044 | | Tatoeba-test.kab-heb.kab.heb | 5.1 | 0.099 | | Tatoeba-test.kab-shy.kab.shy | 2.2 | 0.009 | | Tatoeba-test.kab-tmh.kab.tmh | 10.7 | 0.007 | | Tatoeba-test.mlt-ara.mlt.ara | 29.1 | 0.498 | | Tatoeba-test.multi.multi | 20.8 | 0.434 | | Tatoeba-test.shy-ara.shy.ara | 1.2 | 0.053 | | Tatoeba-test.shy-kab.shy.kab | 2.0 | 0.134 | | Tatoeba-test.tmh-kab.tmh.kab | 0.0 | 0.047 | ### System Info: - hf_name: afa-afa - source_languages: afa - target_languages: afa - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/afa-afa/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['so', 'ti', 'am', 'he', 'mt', 'ar', 'afa'] - src_constituents: {'som', 'rif_Latn', 'tir', 'kab', 'arq', 'afb', 'amh', 'arz', 'heb', 'shy_Latn', 'apc', 'mlt', 'thv', 'ara', 'hau_Latn', 'acm', 'ary'} - tgt_constituents: {'som', 'rif_Latn', 'tir', 'kab', 'arq', 'afb', 'amh', 'arz', 'heb', 'shy_Latn', 'apc', 'mlt', 'thv', 'ara', 'hau_Latn', 'acm', 'ary'} - src_multilingual: True - tgt_multilingual: True - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/afa-afa/opus-2020-07-26.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/afa-afa/opus-2020-07-26.test.txt - src_alpha3: afa - tgt_alpha3: afa - short_pair: afa-afa - chrF2_score: 0.434 - bleu: 20.8 - brevity_penalty: 1.0 - ref_len: 15215.0 - src_name: Afro-Asiatic languages - tgt_name: Afro-Asiatic languages - train_date: 2020-07-26 - src_alpha2: afa - tgt_alpha2: afa - prefer_old: False - long_pair: afa-afa - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
AshLukass/AshLukass
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - so - ti - am - he - mt - ar - afa - en tags: - translation license: apache-2.0 --- ### afa-eng * source group: Afro-Asiatic languages * target group: English * OPUS readme: [afa-eng](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/afa-eng/README.md) * model: transformer * source language(s): acm afb amh apc ara arq ary arz hau_Latn heb kab mlt rif_Latn shy_Latn som tir * target language(s): eng * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus2m-2020-07-31.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/afa-eng/opus2m-2020-07-31.zip) * test set translations: [opus2m-2020-07-31.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/afa-eng/opus2m-2020-07-31.test.txt) * test set scores: [opus2m-2020-07-31.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/afa-eng/opus2m-2020-07-31.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.amh-eng.amh.eng | 35.9 | 0.550 | | Tatoeba-test.ara-eng.ara.eng | 36.6 | 0.543 | | Tatoeba-test.hau-eng.hau.eng | 11.9 | 0.327 | | Tatoeba-test.heb-eng.heb.eng | 42.7 | 0.591 | | Tatoeba-test.kab-eng.kab.eng | 4.3 | 0.213 | | Tatoeba-test.mlt-eng.mlt.eng | 44.3 | 0.618 | | Tatoeba-test.multi.eng | 27.1 | 0.464 | | Tatoeba-test.rif-eng.rif.eng | 3.5 | 0.141 | | Tatoeba-test.shy-eng.shy.eng | 0.6 | 0.125 | | Tatoeba-test.som-eng.som.eng | 23.6 | 0.472 | | Tatoeba-test.tir-eng.tir.eng | 13.1 | 0.328 | ### System Info: - hf_name: afa-eng - source_languages: afa - target_languages: eng - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/afa-eng/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['so', 'ti', 'am', 'he', 'mt', 'ar', 'afa', 'en'] - src_constituents: {'som', 'rif_Latn', 'tir', 'kab', 'arq', 'afb', 'amh', 'arz', 'heb', 'shy_Latn', 'apc', 'mlt', 'thv', 'ara', 'hau_Latn', 'acm', 'ary'} - tgt_constituents: {'eng'} - src_multilingual: True - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/afa-eng/opus2m-2020-07-31.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/afa-eng/opus2m-2020-07-31.test.txt - src_alpha3: afa - tgt_alpha3: eng - short_pair: afa-en - chrF2_score: 0.46399999999999997 - bleu: 27.1 - brevity_penalty: 1.0 - ref_len: 69373.0 - src_name: Afro-Asiatic languages - tgt_name: English - train_date: 2020-07-31 - src_alpha2: afa - tgt_alpha2: en - prefer_old: False - long_pair: afa-eng - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Ashagi/Ashvx
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - sn - rw - wo - ig - sg - ee - zu - lg - ts - ln - ny - yo - rn - xh - alv - en tags: - translation license: apache-2.0 --- ### alv-eng * source group: Atlantic-Congo languages * target group: English * OPUS readme: [alv-eng](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/alv-eng/README.md) * model: transformer * source language(s): ewe fuc fuv ibo kin lin lug nya run sag sna swh toi_Latn tso umb wol xho yor zul * target language(s): eng * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus2m-2020-07-31.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/alv-eng/opus2m-2020-07-31.zip) * test set translations: [opus2m-2020-07-31.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/alv-eng/opus2m-2020-07-31.test.txt) * test set scores: [opus2m-2020-07-31.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/alv-eng/opus2m-2020-07-31.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.ewe-eng.ewe.eng | 6.3 | 0.328 | | Tatoeba-test.ful-eng.ful.eng | 0.4 | 0.108 | | Tatoeba-test.ibo-eng.ibo.eng | 4.5 | 0.196 | | Tatoeba-test.kin-eng.kin.eng | 30.7 | 0.511 | | Tatoeba-test.lin-eng.lin.eng | 2.8 | 0.213 | | Tatoeba-test.lug-eng.lug.eng | 3.4 | 0.140 | | Tatoeba-test.multi.eng | 20.9 | 0.376 | | Tatoeba-test.nya-eng.nya.eng | 38.7 | 0.492 | | Tatoeba-test.run-eng.run.eng | 24.5 | 0.417 | | Tatoeba-test.sag-eng.sag.eng | 5.5 | 0.177 | | Tatoeba-test.sna-eng.sna.eng | 26.9 | 0.412 | | Tatoeba-test.swa-eng.swa.eng | 4.9 | 0.196 | | Tatoeba-test.toi-eng.toi.eng | 3.9 | 0.147 | | Tatoeba-test.tso-eng.tso.eng | 76.7 | 0.957 | | Tatoeba-test.umb-eng.umb.eng | 4.0 | 0.195 | | Tatoeba-test.wol-eng.wol.eng | 3.7 | 0.170 | | Tatoeba-test.xho-eng.xho.eng | 38.9 | 0.556 | | Tatoeba-test.yor-eng.yor.eng | 25.1 | 0.412 | | Tatoeba-test.zul-eng.zul.eng | 46.1 | 0.623 | ### System Info: - hf_name: alv-eng - source_languages: alv - target_languages: eng - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/alv-eng/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['sn', 'rw', 'wo', 'ig', 'sg', 'ee', 'zu', 'lg', 'ts', 'ln', 'ny', 'yo', 'rn', 'xh', 'alv', 'en'] - src_constituents: {'sna', 'kin', 'wol', 'ibo', 'swh', 'sag', 'ewe', 'zul', 'fuc', 'lug', 'tso', 'lin', 'nya', 'yor', 'run', 'xho', 'fuv', 'toi_Latn', 'umb'} - tgt_constituents: {'eng'} - src_multilingual: True - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/alv-eng/opus2m-2020-07-31.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/alv-eng/opus2m-2020-07-31.test.txt - src_alpha3: alv - tgt_alpha3: eng - short_pair: alv-en - chrF2_score: 0.376 - bleu: 20.9 - brevity_penalty: 1.0 - ref_len: 15208.0 - src_name: Atlantic-Congo languages - tgt_name: English - train_date: 2020-07-31 - src_alpha2: alv - tgt_alpha2: en - prefer_old: False - long_pair: alv-eng - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Ashim/dga-transformer
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - ar - de tags: - translation license: apache-2.0 --- ### ara-deu * source group: Arabic * target group: German * OPUS readme: [ara-deu](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/ara-deu/README.md) * model: transformer-align * source language(s): afb apc ara ara_Latn arq arz * target language(s): deu * model: transformer-align * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus-2020-07-03.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/ara-deu/opus-2020-07-03.zip) * test set translations: [opus-2020-07-03.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/ara-deu/opus-2020-07-03.test.txt) * test set scores: [opus-2020-07-03.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/ara-deu/opus-2020-07-03.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.ara.deu | 44.7 | 0.629 | ### System Info: - hf_name: ara-deu - source_languages: ara - target_languages: deu - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/ara-deu/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['ar', 'de'] - src_constituents: {'apc', 'ara', 'arq_Latn', 'arq', 'afb', 'ara_Latn', 'apc_Latn', 'arz'} - tgt_constituents: {'deu'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/ara-deu/opus-2020-07-03.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/ara-deu/opus-2020-07-03.test.txt - src_alpha3: ara - tgt_alpha3: deu - short_pair: ar-de - chrF2_score: 0.629 - bleu: 44.7 - brevity_penalty: 0.986 - ref_len: 8371.0 - src_name: Arabic - tgt_name: German - train_date: 2020-07-03 - src_alpha2: ar - tgt_alpha2: de - prefer_old: False - long_pair: ara-deu - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Ashkanmh/bert-base-parsbert-uncased-finetuned
[ "pytorch", "tensorboard", "bert", "fill-mask", "transformers", "generated_from_trainer", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
"2020-08-19T00:05:47Z"
--- language: - ar - el tags: - translation license: apache-2.0 --- ### ara-ell * source group: Arabic * target group: Modern Greek (1453-) * OPUS readme: [ara-ell](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/ara-ell/README.md) * model: transformer-align * source language(s): ara arz * target language(s): ell * model: transformer-align * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus-2020-07-03.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/ara-ell/opus-2020-07-03.zip) * test set translations: [opus-2020-07-03.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/ara-ell/opus-2020-07-03.test.txt) * test set scores: [opus-2020-07-03.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/ara-ell/opus-2020-07-03.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.ara.ell | 43.9 | 0.636 | ### System Info: - hf_name: ara-ell - source_languages: ara - target_languages: ell - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/ara-ell/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['ar', 'el'] - src_constituents: {'apc', 'ara', 'arq_Latn', 'arq', 'afb', 'ara_Latn', 'apc_Latn', 'arz'} - tgt_constituents: {'ell'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/ara-ell/opus-2020-07-03.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/ara-ell/opus-2020-07-03.test.txt - src_alpha3: ara - tgt_alpha3: ell - short_pair: ar-el - chrF2_score: 0.636 - bleu: 43.9 - brevity_penalty: 0.993 - ref_len: 2009.0 - src_name: Arabic - tgt_name: Modern Greek (1453-) - train_date: 2020-07-03 - src_alpha2: ar - tgt_alpha2: el - prefer_old: False - long_pair: ara-ell - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Ashok/my-new-tokenizer
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - ar - eo tags: - translation license: apache-2.0 --- ### ara-epo * source group: Arabic * target group: Esperanto * OPUS readme: [ara-epo](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/ara-epo/README.md) * model: transformer-align * source language(s): apc apc_Latn ara arq arq_Latn arz * target language(s): epo * model: transformer-align * pre-processing: normalization + SentencePiece (spm4k,spm4k) * download original weights: [opus-2020-06-16.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/ara-epo/opus-2020-06-16.zip) * test set translations: [opus-2020-06-16.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/ara-epo/opus-2020-06-16.test.txt) * test set scores: [opus-2020-06-16.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/ara-epo/opus-2020-06-16.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.ara.epo | 18.9 | 0.376 | ### System Info: - hf_name: ara-epo - source_languages: ara - target_languages: epo - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/ara-epo/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['ar', 'eo'] - src_constituents: {'apc', 'ara', 'arq_Latn', 'arq', 'afb', 'ara_Latn', 'apc_Latn', 'arz'} - tgt_constituents: {'epo'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm4k,spm4k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/ara-epo/opus-2020-06-16.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/ara-epo/opus-2020-06-16.test.txt - src_alpha3: ara - tgt_alpha3: epo - short_pair: ar-eo - chrF2_score: 0.376 - bleu: 18.9 - brevity_penalty: 0.948 - ref_len: 4506.0 - src_name: Arabic - tgt_name: Esperanto - train_date: 2020-06-16 - src_alpha2: ar - tgt_alpha2: eo - prefer_old: False - long_pair: ara-epo - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
AshtonBenson/DialoGPT-small-quentin-coldwater
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - ar - es tags: - translation license: apache-2.0 --- ### ara-spa * source group: Arabic * target group: Spanish * OPUS readme: [ara-spa](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/ara-spa/README.md) * model: transformer * source language(s): apc apc_Latn ara arq * target language(s): spa * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus-2020-07-03.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/ara-spa/opus-2020-07-03.zip) * test set translations: [opus-2020-07-03.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/ara-spa/opus-2020-07-03.test.txt) * test set scores: [opus-2020-07-03.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/ara-spa/opus-2020-07-03.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.ara.spa | 46.0 | 0.641 | ### System Info: - hf_name: ara-spa - source_languages: ara - target_languages: spa - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/ara-spa/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['ar', 'es'] - src_constituents: {'apc', 'ara', 'arq_Latn', 'arq', 'afb', 'ara_Latn', 'apc_Latn', 'arz'} - tgt_constituents: {'spa'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/ara-spa/opus-2020-07-03.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/ara-spa/opus-2020-07-03.test.txt - src_alpha3: ara - tgt_alpha3: spa - short_pair: ar-es - chrF2_score: 0.6409999999999999 - bleu: 46.0 - brevity_penalty: 0.9620000000000001 - ref_len: 9708.0 - src_name: Arabic - tgt_name: Spanish - train_date: 2020-07-03 - src_alpha2: ar - tgt_alpha2: es - prefer_old: False - long_pair: ara-spa - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
AshtonBenson/DialoGPT-small-quentin
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-ar-fr * source languages: ar * target languages: fr * OPUS readme: [ar-fr](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/ar-fr/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-24.zip](https://object.pouta.csc.fi/OPUS-MT-models/ar-fr/opus-2020-01-24.zip) * test set translations: [opus-2020-01-24.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/ar-fr/opus-2020-01-24.test.txt) * test set scores: [opus-2020-01-24.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/ar-fr/opus-2020-01-24.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba.ar.fr | 43.5 | 0.602 |
Aspect11/DialoGPT-Medium-LiSBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: - ar - he tags: - translation license: apache-2.0 --- ### ara-heb * source group: Arabic * target group: Hebrew * OPUS readme: [ara-heb](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/ara-heb/README.md) * model: transformer * source language(s): apc apc_Latn ara arq arz * target language(s): heb * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus-2020-07-03.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/ara-heb/opus-2020-07-03.zip) * test set translations: [opus-2020-07-03.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/ara-heb/opus-2020-07-03.test.txt) * test set scores: [opus-2020-07-03.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/ara-heb/opus-2020-07-03.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.ara.heb | 40.4 | 0.605 | ### System Info: - hf_name: ara-heb - source_languages: ara - target_languages: heb - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/ara-heb/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['ar', 'he'] - src_constituents: {'apc', 'ara', 'arq_Latn', 'arq', 'afb', 'ara_Latn', 'apc_Latn', 'arz'} - tgt_constituents: {'heb'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/ara-heb/opus-2020-07-03.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/ara-heb/opus-2020-07-03.test.txt - src_alpha3: ara - tgt_alpha3: heb - short_pair: ar-he - chrF2_score: 0.605 - bleu: 40.4 - brevity_penalty: 1.0 - ref_len: 6801.0 - src_name: Arabic - tgt_name: Hebrew - train_date: 2020-07-03 - src_alpha2: ar - tgt_alpha2: he - prefer_old: False - long_pair: ara-heb - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Asuramaru/DialoGPT-small-rintohsaka
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: - ar - it tags: - translation license: apache-2.0 --- ### ara-ita * source group: Arabic * target group: Italian * OPUS readme: [ara-ita](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/ara-ita/README.md) * model: transformer * source language(s): ara * target language(s): ita * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus-2020-07-03.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/ara-ita/opus-2020-07-03.zip) * test set translations: [opus-2020-07-03.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/ara-ita/opus-2020-07-03.test.txt) * test set scores: [opus-2020-07-03.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/ara-ita/opus-2020-07-03.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.ara.ita | 44.2 | 0.658 | ### System Info: - hf_name: ara-ita - source_languages: ara - target_languages: ita - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/ara-ita/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['ar', 'it'] - src_constituents: {'apc', 'ara', 'arq_Latn', 'arq', 'afb', 'ara_Latn', 'apc_Latn', 'arz'} - tgt_constituents: {'ita'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/ara-ita/opus-2020-07-03.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/ara-ita/opus-2020-07-03.test.txt - src_alpha3: ara - tgt_alpha3: ita - short_pair: ar-it - chrF2_score: 0.6579999999999999 - bleu: 44.2 - brevity_penalty: 0.9890000000000001 - ref_len: 1495.0 - src_name: Arabic - tgt_name: Italian - train_date: 2020-07-03 - src_alpha2: ar - tgt_alpha2: it - prefer_old: False - long_pair: ara-ita - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Atchuth/DialoGPT-small-MBOT
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - eo - io - art - en tags: - translation license: apache-2.0 --- ### art-eng * source group: Artificial languages * target group: English * OPUS readme: [art-eng](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/art-eng/README.md) * model: transformer * source language(s): afh_Latn avk_Latn dws_Latn epo ido ido_Latn ile_Latn ina_Latn jbo jbo_Cyrl jbo_Latn ldn_Latn lfn_Cyrl lfn_Latn nov_Latn qya qya_Latn sjn_Latn tlh_Latn tzl tzl_Latn vol_Latn * target language(s): eng * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus2m-2020-07-31.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/art-eng/opus2m-2020-07-31.zip) * test set translations: [opus2m-2020-07-31.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/art-eng/opus2m-2020-07-31.test.txt) * test set scores: [opus2m-2020-07-31.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/art-eng/opus2m-2020-07-31.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.afh-eng.afh.eng | 1.2 | 0.099 | | Tatoeba-test.avk-eng.avk.eng | 0.4 | 0.105 | | Tatoeba-test.dws-eng.dws.eng | 1.6 | 0.076 | | Tatoeba-test.epo-eng.epo.eng | 34.6 | 0.530 | | Tatoeba-test.ido-eng.ido.eng | 12.7 | 0.310 | | Tatoeba-test.ile-eng.ile.eng | 4.6 | 0.218 | | Tatoeba-test.ina-eng.ina.eng | 5.8 | 0.254 | | Tatoeba-test.jbo-eng.jbo.eng | 0.2 | 0.115 | | Tatoeba-test.ldn-eng.ldn.eng | 0.7 | 0.083 | | Tatoeba-test.lfn-eng.lfn.eng | 1.8 | 0.172 | | Tatoeba-test.multi.eng | 11.6 | 0.287 | | Tatoeba-test.nov-eng.nov.eng | 5.1 | 0.215 | | Tatoeba-test.qya-eng.qya.eng | 0.7 | 0.113 | | Tatoeba-test.sjn-eng.sjn.eng | 0.9 | 0.090 | | Tatoeba-test.tlh-eng.tlh.eng | 0.2 | 0.124 | | Tatoeba-test.tzl-eng.tzl.eng | 1.4 | 0.109 | | Tatoeba-test.vol-eng.vol.eng | 0.5 | 0.115 | ### System Info: - hf_name: art-eng - source_languages: art - target_languages: eng - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/art-eng/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['eo', 'io', 'art', 'en'] - src_constituents: {'sjn_Latn', 'tzl', 'vol_Latn', 'qya', 'tlh_Latn', 'ile_Latn', 'ido_Latn', 'tzl_Latn', 'jbo_Cyrl', 'jbo', 'lfn_Latn', 'nov_Latn', 'dws_Latn', 'ldn_Latn', 'avk_Latn', 'lfn_Cyrl', 'ina_Latn', 'jbo_Latn', 'epo', 'afh_Latn', 'qya_Latn', 'ido'} - tgt_constituents: {'eng'} - src_multilingual: True - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/art-eng/opus2m-2020-07-31.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/art-eng/opus2m-2020-07-31.test.txt - src_alpha3: art - tgt_alpha3: eng - short_pair: art-en - chrF2_score: 0.287 - bleu: 11.6 - brevity_penalty: 1.0 - ref_len: 73037.0 - src_name: Artificial languages - tgt_name: English - train_date: 2020-07-31 - src_alpha2: art - tgt_alpha2: en - prefer_old: False - long_pair: art-eng - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Atchuth/DialoGPT-small-MichaelBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-ase-de * source languages: ase * target languages: de * OPUS readme: [ase-de](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/ase-de/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-20.zip](https://object.pouta.csc.fi/OPUS-MT-models/ase-de/opus-2020-01-20.zip) * test set translations: [opus-2020-01-20.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/ase-de/opus-2020-01-20.test.txt) * test set scores: [opus-2020-01-20.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/ase-de/opus-2020-01-20.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.ase.de | 27.2 | 0.478 |
Atchuth/MBOT
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
"2020-04-29T00:58:01Z"
--- tags: - translation license: apache-2.0 --- ### opus-mt-ase-en * source languages: ase * target languages: en * OPUS readme: [ase-en](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/ase-en/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-20.zip](https://object.pouta.csc.fi/OPUS-MT-models/ase-en/opus-2020-01-20.zip) * test set translations: [opus-2020-01-20.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/ase-en/opus-2020-01-20.test.txt) * test set scores: [opus-2020-01-20.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/ase-en/opus-2020-01-20.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.ase.en | 99.5 | 0.997 |
Augustvember/WokkaBot7
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-bem-fi * source languages: bem * target languages: fi * OPUS readme: [bem-fi](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/bem-fi/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-08.zip](https://object.pouta.csc.fi/OPUS-MT-models/bem-fi/opus-2020-01-08.zip) * test set translations: [opus-2020-01-08.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/bem-fi/opus-2020-01-08.test.txt) * test set scores: [opus-2020-01-08.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/bem-fi/opus-2020-01-08.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.bem.fi | 22.8 | 0.439 |
Augustvember/WokkaBot8
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-bem-fr * source languages: bem * target languages: fr * OPUS readme: [bem-fr](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/bem-fr/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-08.zip](https://object.pouta.csc.fi/OPUS-MT-models/bem-fr/opus-2020-01-08.zip) * test set translations: [opus-2020-01-08.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/bem-fr/opus-2020-01-08.test.txt) * test set scores: [opus-2020-01-08.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/bem-fr/opus-2020-01-08.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.bem.fr | 25.0 | 0.417 |
Augustvember/WokkaBot9
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-bem-sv * source languages: bem * target languages: sv * OPUS readme: [bem-sv](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/bem-sv/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-08.zip](https://object.pouta.csc.fi/OPUS-MT-models/bem-sv/opus-2020-01-08.zip) * test set translations: [opus-2020-01-08.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/bem-sv/opus-2020-01-08.test.txt) * test set scores: [opus-2020-01-08.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/bem-sv/opus-2020-01-08.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.bem.sv | 25.6 | 0.434 |
Augustvember/WokkaBotF
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-ber-es * source languages: ber * target languages: es * OPUS readme: [ber-es](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/ber-es/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-15.zip](https://object.pouta.csc.fi/OPUS-MT-models/ber-es/opus-2020-01-15.zip) * test set translations: [opus-2020-01-15.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/ber-es/opus-2020-01-15.test.txt) * test set scores: [opus-2020-01-15.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/ber-es/opus-2020-01-15.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba.ber.es | 33.8 | 0.487 |
Augustvember/test
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-ber-fr * source languages: ber * target languages: fr * OPUS readme: [ber-fr](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/ber-fr/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-08.zip](https://object.pouta.csc.fi/OPUS-MT-models/ber-fr/opus-2020-01-08.zip) * test set translations: [opus-2020-01-08.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/ber-fr/opus-2020-01-08.test.txt) * test set scores: [opus-2020-01-08.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/ber-fr/opus-2020-01-08.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba.ber.fr | 60.2 | 0.754 |
Augustvember/wokka
[ "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- language: - bg - de tags: - translation license: apache-2.0 --- ### bul-deu * source group: Bulgarian * target group: German * OPUS readme: [bul-deu](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/bul-deu/README.md) * model: transformer * source language(s): bul * target language(s): deu * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus-2020-07-03.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/bul-deu/opus-2020-07-03.zip) * test set translations: [opus-2020-07-03.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/bul-deu/opus-2020-07-03.test.txt) * test set scores: [opus-2020-07-03.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/bul-deu/opus-2020-07-03.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.bul.deu | 49.3 | 0.676 | ### System Info: - hf_name: bul-deu - source_languages: bul - target_languages: deu - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/bul-deu/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['bg', 'de'] - src_constituents: {'bul', 'bul_Latn'} - tgt_constituents: {'deu'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/bul-deu/opus-2020-07-03.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/bul-deu/opus-2020-07-03.test.txt - src_alpha3: bul - tgt_alpha3: deu - short_pair: bg-de - chrF2_score: 0.6759999999999999 - bleu: 49.3 - brevity_penalty: 1.0 - ref_len: 2218.0 - src_name: Bulgarian - tgt_name: German - train_date: 2020-07-03 - src_alpha2: bg - tgt_alpha2: de - prefer_old: False - long_pair: bul-deu - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Augustvember/wokka4
[ "conversational" ]
conversational
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - bg - eo tags: - translation license: apache-2.0 --- ### bul-epo * source group: Bulgarian * target group: Esperanto * OPUS readme: [bul-epo](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/bul-epo/README.md) * model: transformer-align * source language(s): bul * target language(s): epo * model: transformer-align * pre-processing: normalization + SentencePiece (spm4k,spm4k) * download original weights: [opus-2020-06-16.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/bul-epo/opus-2020-06-16.zip) * test set translations: [opus-2020-06-16.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/bul-epo/opus-2020-06-16.test.txt) * test set scores: [opus-2020-06-16.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/bul-epo/opus-2020-06-16.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.bul.epo | 24.5 | 0.438 | ### System Info: - hf_name: bul-epo - source_languages: bul - target_languages: epo - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/bul-epo/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['bg', 'eo'] - src_constituents: {'bul', 'bul_Latn'} - tgt_constituents: {'epo'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm4k,spm4k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/bul-epo/opus-2020-06-16.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/bul-epo/opus-2020-06-16.test.txt - src_alpha3: bul - tgt_alpha3: epo - short_pair: bg-eo - chrF2_score: 0.43799999999999994 - bleu: 24.5 - brevity_penalty: 0.9670000000000001 - ref_len: 4043.0 - src_name: Bulgarian - tgt_name: Esperanto - train_date: 2020-06-16 - src_alpha2: bg - tgt_alpha2: eo - prefer_old: False - long_pair: bul-epo - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Augustvember/wokka5
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- language: - bg - es tags: - translation license: apache-2.0 --- ### bul-spa * source group: Bulgarian * target group: Spanish * OPUS readme: [bul-spa](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/bul-spa/README.md) * model: transformer * source language(s): bul * target language(s): spa * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus-2020-07-03.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/bul-spa/opus-2020-07-03.zip) * test set translations: [opus-2020-07-03.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/bul-spa/opus-2020-07-03.test.txt) * test set scores: [opus-2020-07-03.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/bul-spa/opus-2020-07-03.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.bul.spa | 49.1 | 0.661 | ### System Info: - hf_name: bul-spa - source_languages: bul - target_languages: spa - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/bul-spa/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['bg', 'es'] - src_constituents: {'bul', 'bul_Latn'} - tgt_constituents: {'spa'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/bul-spa/opus-2020-07-03.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/bul-spa/opus-2020-07-03.test.txt - src_alpha3: bul - tgt_alpha3: spa - short_pair: bg-es - chrF2_score: 0.6609999999999999 - bleu: 49.1 - brevity_penalty: 0.992 - ref_len: 1783.0 - src_name: Bulgarian - tgt_name: Spanish - train_date: 2020-07-03 - src_alpha2: bg - tgt_alpha2: es - prefer_old: False - long_pair: bul-spa - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Augustvember/wokkabottest2
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
"2020-04-29T01:04:02Z"
--- tags: - translation license: apache-2.0 --- ### opus-mt-bg-fi * source languages: bg * target languages: fi * OPUS readme: [bg-fi](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/bg-fi/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-08.zip](https://object.pouta.csc.fi/OPUS-MT-models/bg-fi/opus-2020-01-08.zip) * test set translations: [opus-2020-01-08.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/bg-fi/opus-2020-01-08.test.txt) * test set scores: [opus-2020-01-08.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/bg-fi/opus-2020-01-08.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.bg.fi | 23.7 | 0.505 |
Aybars/ModelOnTquad
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-bzs-fi * source languages: bzs * target languages: fi * OPUS readme: [bzs-fi](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/bzs-fi/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-08.zip](https://object.pouta.csc.fi/OPUS-MT-models/bzs-fi/opus-2020-01-08.zip) * test set translations: [opus-2020-01-08.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/bzs-fi/opus-2020-01-08.test.txt) * test set scores: [opus-2020-01-08.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/bzs-fi/opus-2020-01-08.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.bzs.fi | 24.7 | 0.464 |
Ayham/albert_bert_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- language: - ca - de tags: - translation license: apache-2.0 --- ### cat-deu * source group: Catalan * target group: German * OPUS readme: [cat-deu](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/cat-deu/README.md) * model: transformer-align * source language(s): cat * target language(s): deu * model: transformer-align * pre-processing: normalization + SentencePiece (spm12k,spm12k) * download original weights: [opus-2020-06-16.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/cat-deu/opus-2020-06-16.zip) * test set translations: [opus-2020-06-16.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/cat-deu/opus-2020-06-16.test.txt) * test set scores: [opus-2020-06-16.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/cat-deu/opus-2020-06-16.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.cat.deu | 39.5 | 0.593 | ### System Info: - hf_name: cat-deu - source_languages: cat - target_languages: deu - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/cat-deu/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['ca', 'de'] - src_constituents: {'cat'} - tgt_constituents: {'deu'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm12k,spm12k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/cat-deu/opus-2020-06-16.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/cat-deu/opus-2020-06-16.test.txt - src_alpha3: cat - tgt_alpha3: deu - short_pair: ca-de - chrF2_score: 0.593 - bleu: 39.5 - brevity_penalty: 1.0 - ref_len: 5643.0 - src_name: Catalan - tgt_name: German - train_date: 2020-06-16 - src_alpha2: ca - tgt_alpha2: de - prefer_old: False - long_pair: cat-deu - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Ayham/albert_distilgpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-ca-en * source languages: ca * target languages: en * OPUS readme: [ca-en](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/ca-en/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2019-12-18.zip](https://object.pouta.csc.fi/OPUS-MT-models/ca-en/opus-2019-12-18.zip) * test set translations: [opus-2019-12-18.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/ca-en/opus-2019-12-18.test.txt) * test set scores: [opus-2019-12-18.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/ca-en/opus-2019-12-18.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba.ca.en | 51.4 | 0.678 |
Ayham/roberta_roberta_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-crs-en * source languages: crs * target languages: en * OPUS readme: [crs-en](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/crs-en/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2019-12-18.zip](https://object.pouta.csc.fi/OPUS-MT-models/crs-en/opus-2019-12-18.zip) * test set translations: [opus-2019-12-18.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/crs-en/opus-2019-12-18.test.txt) * test set scores: [opus-2019-12-18.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/crs-en/opus-2019-12-18.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.crs.en | 42.9 | 0.589 |
Ayham/xlmroberta_gpt2_summarization_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-cs-de * source languages: cs * target languages: de * OPUS readme: [cs-de](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/cs-de/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-20.zip](https://object.pouta.csc.fi/OPUS-MT-models/cs-de/opus-2020-01-20.zip) * test set translations: [opus-2020-01-20.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/cs-de/opus-2020-01-20.test.txt) * test set scores: [opus-2020-01-20.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/cs-de/opus-2020-01-20.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | newssyscomb2009.cs.de | 22.0 | 0.525 | | news-test2008.cs.de | 21.1 | 0.520 | | newstest2009.cs.de | 22.2 | 0.525 | | newstest2010.cs.de | 22.1 | 0.527 | | newstest2011.cs.de | 21.6 | 0.515 | | newstest2012.cs.de | 22.2 | 0.516 | | newstest2013.cs.de | 24.8 | 0.538 | | newstest2019-csde.cs.de | 23.6 | 0.530 | | Tatoeba.cs.de | 51.6 | 0.687 |
Ayham/xlmroberta_large_gpt2_summarization_cnndm
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
"2020-04-29T01:14:34Z"
--- tags: - translation license: apache-2.0 --- ### opus-mt-cs-en * source languages: cs * target languages: en * OPUS readme: [cs-en](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/cs-en/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2019-12-18.zip](https://object.pouta.csc.fi/OPUS-MT-models/cs-en/opus-2019-12-18.zip) * test set translations: [opus-2019-12-18.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/cs-en/opus-2019-12-18.test.txt) * test set scores: [opus-2019-12-18.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/cs-en/opus-2019-12-18.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | newstest2014-csen.cs.en | 34.1 | 0.612 | | newstest2015-encs.cs.en | 30.4 | 0.565 | | newstest2016-encs.cs.en | 31.8 | 0.584 | | newstest2017-encs.cs.en | 28.7 | 0.556 | | newstest2018-encs.cs.en | 30.3 | 0.566 | | Tatoeba.cs.en | 58.0 | 0.721 |
Ayham/xlnet_bert_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
"2020-08-19T00:08:04Z"
--- language: - cs - eo tags: - translation license: apache-2.0 --- ### ces-epo * source group: Czech * target group: Esperanto * OPUS readme: [ces-epo](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/ces-epo/README.md) * model: transformer-align * source language(s): ces * target language(s): epo * model: transformer-align * pre-processing: normalization + SentencePiece (spm4k,spm4k) * download original weights: [opus-2020-06-16.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/ces-epo/opus-2020-06-16.zip) * test set translations: [opus-2020-06-16.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/ces-epo/opus-2020-06-16.test.txt) * test set scores: [opus-2020-06-16.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/ces-epo/opus-2020-06-16.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.ces.epo | 26.0 | 0.459 | ### System Info: - hf_name: ces-epo - source_languages: ces - target_languages: epo - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/ces-epo/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['cs', 'eo'] - src_constituents: {'ces'} - tgt_constituents: {'epo'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm4k,spm4k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/ces-epo/opus-2020-06-16.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/ces-epo/opus-2020-06-16.test.txt - src_alpha3: ces - tgt_alpha3: epo - short_pair: cs-eo - chrF2_score: 0.45899999999999996 - bleu: 26.0 - brevity_penalty: 0.94 - ref_len: 24901.0 - src_name: Czech - tgt_name: Esperanto - train_date: 2020-06-16 - src_alpha2: cs - tgt_alpha2: eo - prefer_old: False - long_pair: ces-epo - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Ayham/xlnet_distilgpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-cs-fi * source languages: cs * target languages: fi * OPUS readme: [cs-fi](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/cs-fi/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-08.zip](https://object.pouta.csc.fi/OPUS-MT-models/cs-fi/opus-2020-01-08.zip) * test set translations: [opus-2020-01-08.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/cs-fi/opus-2020-01-08.test.txt) * test set scores: [opus-2020-01-08.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/cs-fi/opus-2020-01-08.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.cs.fi | 25.5 | 0.523 |
Ayoola/pytorch_model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - da - eo tags: - translation license: apache-2.0 --- ### dan-epo * source group: Danish * target group: Esperanto * OPUS readme: [dan-epo](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/dan-epo/README.md) * model: transformer-align * source language(s): dan * target language(s): epo * model: transformer-align * pre-processing: normalization + SentencePiece (spm4k,spm4k) * download original weights: [opus-2020-06-16.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/dan-epo/opus-2020-06-16.zip) * test set translations: [opus-2020-06-16.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/dan-epo/opus-2020-06-16.test.txt) * test set scores: [opus-2020-06-16.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/dan-epo/opus-2020-06-16.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.dan.epo | 23.6 | 0.432 | ### System Info: - hf_name: dan-epo - source_languages: dan - target_languages: epo - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/dan-epo/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['da', 'eo'] - src_constituents: {'dan'} - tgt_constituents: {'epo'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm4k,spm4k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/dan-epo/opus-2020-06-16.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/dan-epo/opus-2020-06-16.test.txt - src_alpha3: dan - tgt_alpha3: epo - short_pair: da-eo - chrF2_score: 0.43200000000000005 - bleu: 23.6 - brevity_penalty: 0.9420000000000001 - ref_len: 69856.0 - src_name: Danish - tgt_name: Esperanto - train_date: 2020-06-16 - src_alpha2: da - tgt_alpha2: eo - prefer_old: False - long_pair: dan-epo - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Ayumi/Jovana
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - de - bg tags: - translation license: apache-2.0 --- ### deu-bul * source group: German * target group: Bulgarian * OPUS readme: [deu-bul](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/deu-bul/README.md) * model: transformer * source language(s): deu * target language(s): bul * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus-2020-07-03.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/deu-bul/opus-2020-07-03.zip) * test set translations: [opus-2020-07-03.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/deu-bul/opus-2020-07-03.test.txt) * test set scores: [opus-2020-07-03.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/deu-bul/opus-2020-07-03.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.deu.bul | 50.7 | 0.683 | ### System Info: - hf_name: deu-bul - source_languages: deu - target_languages: bul - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/deu-bul/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['de', 'bg'] - src_constituents: {'deu'} - tgt_constituents: {'bul', 'bul_Latn'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/deu-bul/opus-2020-07-03.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/deu-bul/opus-2020-07-03.test.txt - src_alpha3: deu - tgt_alpha3: bul - short_pair: de-bg - chrF2_score: 0.6829999999999999 - bleu: 50.7 - brevity_penalty: 0.98 - ref_len: 2032.0 - src_name: German - tgt_name: Bulgarian - train_date: 2020-07-03 - src_alpha2: de - tgt_alpha2: bg - prefer_old: False - long_pair: deu-bul - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
AyushPJ/ai-club-inductions-21-nlp-ALBERT
[ "pytorch", "albert", "question-answering", "transformers", "generated_from_trainer", "autotrain_compatible" ]
question-answering
{ "architectures": [ "AlbertForQuestionAnswering" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-de-bi * source languages: de * target languages: bi * OPUS readme: [de-bi](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/de-bi/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-20.zip](https://object.pouta.csc.fi/OPUS-MT-models/de-bi/opus-2020-01-20.zip) * test set translations: [opus-2020-01-20.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-bi/opus-2020-01-20.test.txt) * test set scores: [opus-2020-01-20.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-bi/opus-2020-01-20.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.de.bi | 25.7 | 0.450 |
AyushPJ/ai-club-inductions-21-nlp-distilBERT
[ "pytorch", "distilbert", "question-answering", "transformers", "generated_from_trainer", "autotrain_compatible" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-de-crs * source languages: de * target languages: crs * OPUS readme: [de-crs](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/de-crs/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-20.zip](https://object.pouta.csc.fi/OPUS-MT-models/de-crs/opus-2020-01-20.zip) * test set translations: [opus-2020-01-20.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-crs/opus-2020-01-20.test.txt) * test set scores: [opus-2020-01-20.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-crs/opus-2020-01-20.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.de.crs | 24.1 | 0.429 |
AyushPJ/ai-club-inductions-21-nlp-roBERTa-base-squad-v2
[ "pytorch", "roberta", "question-answering", "transformers", "generated_from_trainer", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-de-cs * source languages: de * target languages: cs * OPUS readme: [de-cs](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/de-cs/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-20.zip](https://object.pouta.csc.fi/OPUS-MT-models/de-cs/opus-2020-01-20.zip) * test set translations: [opus-2020-01-20.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-cs/opus-2020-01-20.test.txt) * test set scores: [opus-2020-01-20.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-cs/opus-2020-01-20.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | newssyscomb2009.de.cs | 22.4 | 0.499 | | news-test2008.de.cs | 20.2 | 0.487 | | newstest2009.de.cs | 20.9 | 0.485 | | newstest2010.de.cs | 22.7 | 0.510 | | newstest2011.de.cs | 21.2 | 0.487 | | newstest2012.de.cs | 20.9 | 0.479 | | newstest2013.de.cs | 23.0 | 0.500 | | newstest2019-decs.de.cs | 22.5 | 0.495 | | Tatoeba.de.cs | 42.2 | 0.625 |
AyushPJ/test-squad-trained-finetuned-squad
[ "pytorch", "tensorboard", "distilbert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "autotrain_compatible" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-de-de * source languages: de * target languages: de * OPUS readme: [de-de](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/de-de/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-20.zip](https://object.pouta.csc.fi/OPUS-MT-models/de-de/opus-2020-01-20.zip) * test set translations: [opus-2020-01-20.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-de/opus-2020-01-20.test.txt) * test set scores: [opus-2020-01-20.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-de/opus-2020-01-20.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba.de.de | 40.7 | 0.616 |
Azaghast/GPT2-SCP-Descriptions
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-de-en * source languages: de * target languages: en * OPUS readme: [de-en](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/de-en/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-02-26.zip](https://object.pouta.csc.fi/OPUS-MT-models/de-en/opus-2020-02-26.zip) * test set translations: [opus-2020-02-26.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-en/opus-2020-02-26.test.txt) * test set scores: [opus-2020-02-26.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-en/opus-2020-02-26.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | newssyscomb2009.de.en | 29.4 | 0.557 | | news-test2008.de.en | 27.8 | 0.548 | | newstest2009.de.en | 26.8 | 0.543 | | newstest2010.de.en | 30.2 | 0.584 | | newstest2011.de.en | 27.4 | 0.556 | | newstest2012.de.en | 29.1 | 0.569 | | newstest2013.de.en | 32.1 | 0.583 | | newstest2014-deen.de.en | 34.0 | 0.600 | | newstest2015-ende.de.en | 34.2 | 0.599 | | newstest2016-ende.de.en | 40.4 | 0.649 | | newstest2017-ende.de.en | 35.7 | 0.610 | | newstest2018-ende.de.en | 43.7 | 0.667 | | newstest2019-deen.de.en | 40.1 | 0.642 | | Tatoeba.de.en | 55.4 | 0.707 |
Azaghast/GPT2-SCP-Miscellaneous
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-de-eo * source languages: de * target languages: eo * OPUS readme: [de-eo](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/de-eo/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-20.zip](https://object.pouta.csc.fi/OPUS-MT-models/de-eo/opus-2020-01-20.zip) * test set translations: [opus-2020-01-20.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-eo/opus-2020-01-20.test.txt) * test set scores: [opus-2020-01-20.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-eo/opus-2020-01-20.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba.de.eo | 48.6 | 0.673 |
Azizun/Geotrend-10-epochs
[ "pytorch", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-de-es * source languages: de * target languages: es * OPUS readme: [de-es](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/de-es/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-15.zip](https://object.pouta.csc.fi/OPUS-MT-models/de-es/opus-2020-01-15.zip) * test set translations: [opus-2020-01-15.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-es/opus-2020-01-15.test.txt) * test set scores: [opus-2020-01-15.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-es/opus-2020-01-15.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba.de.es | 48.5 | 0.676 |
Azura/data
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-de-et * source languages: de * target languages: et * OPUS readme: [de-et](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/de-et/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-20.zip](https://object.pouta.csc.fi/OPUS-MT-models/de-et/opus-2020-01-20.zip) * test set translations: [opus-2020-01-20.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-et/opus-2020-01-20.test.txt) * test set scores: [opus-2020-01-20.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-et/opus-2020-01-20.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.de.et | 20.2 | 0.465 |
Azuris/DialoGPT-medium-senorita
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-de-fi * source languages: de * target languages: fi * OPUS readme: [de-fi](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/de-fi/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-08.zip](https://object.pouta.csc.fi/OPUS-MT-models/de-fi/opus-2020-01-08.zip) * test set translations: [opus-2020-01-08.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-fi/opus-2020-01-08.test.txt) * test set scores: [opus-2020-01-08.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-fi/opus-2020-01-08.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba.de.fi | 40.0 | 0.628 |
Azuris/DialoGPT-small-envy
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-de-fj * source languages: de * target languages: fj * OPUS readme: [de-fj](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/de-fj/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-20.zip](https://object.pouta.csc.fi/OPUS-MT-models/de-fj/opus-2020-01-20.zip) * test set translations: [opus-2020-01-20.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-fj/opus-2020-01-20.test.txt) * test set scores: [opus-2020-01-20.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-fj/opus-2020-01-20.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.de.fj | 24.6 | 0.470 |
BOON/electra-xlnet
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-de-ha * source languages: de * target languages: ha * OPUS readme: [de-ha](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/de-ha/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-20.zip](https://object.pouta.csc.fi/OPUS-MT-models/de-ha/opus-2020-01-20.zip) * test set translations: [opus-2020-01-20.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-ha/opus-2020-01-20.test.txt) * test set scores: [opus-2020-01-20.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-ha/opus-2020-01-20.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.de.ha | 20.7 | 0.417 |
BSC-LT/RoBERTalex
[ "pytorch", "roberta", "fill-mask", "es", "dataset:legal_ES", "dataset:temu_legal", "arxiv:2110.12201", "transformers", "legal", "spanish", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
24
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-de-hil * source languages: de * target languages: hil * OPUS readme: [de-hil](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/de-hil/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-20.zip](https://object.pouta.csc.fi/OPUS-MT-models/de-hil/opus-2020-01-20.zip) * test set translations: [opus-2020-01-20.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-hil/opus-2020-01-20.test.txt) * test set scores: [opus-2020-01-20.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-hil/opus-2020-01-20.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.de.hil | 33.9 | 0.563 |
BSC-LT/roberta-base-biomedical-clinical-es
[ "pytorch", "roberta", "fill-mask", "es", "arxiv:2109.03570", "arxiv:2109.07765", "transformers", "biomedical", "clinical", "spanish", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-de-hr * source languages: de * target languages: hr * OPUS readme: [de-hr](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/de-hr/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-26.zip](https://object.pouta.csc.fi/OPUS-MT-models/de-hr/opus-2020-01-26.zip) * test set translations: [opus-2020-01-26.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-hr/opus-2020-01-26.test.txt) * test set scores: [opus-2020-01-26.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-hr/opus-2020-01-26.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba.de.hr | 42.6 | 0.643 |
BSC-LT/roberta-base-bne-capitel-pos
[ "pytorch", "roberta", "token-classification", "es", "dataset:bne", "dataset:capitel", "arxiv:1907.11692", "arxiv:2107.07253", "transformers", "national library of spain", "spanish", "bne", "capitel", "pos", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-de-ilo * source languages: de * target languages: ilo * OPUS readme: [de-ilo](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/de-ilo/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-20.zip](https://object.pouta.csc.fi/OPUS-MT-models/de-ilo/opus-2020-01-20.zip) * test set translations: [opus-2020-01-20.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-ilo/opus-2020-01-20.test.txt) * test set scores: [opus-2020-01-20.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-ilo/opus-2020-01-20.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.de.ilo | 29.8 | 0.533 |
BSC-LT/roberta-large-bne-sqac
[ "pytorch", "roberta", "question-answering", "es", "dataset:BSC-TeMU/SQAC", "arxiv:1907.11692", "arxiv:2107.07253", "transformers", "national library of spain", "spanish", "bne", "qa", "question answering", "license:apache-2.0", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
"2020-04-29T01:30:59Z"
--- tags: - translation license: apache-2.0 --- ### opus-mt-de-loz * source languages: de * target languages: loz * OPUS readme: [de-loz](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/de-loz/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-20.zip](https://object.pouta.csc.fi/OPUS-MT-models/de-loz/opus-2020-01-20.zip) * test set translations: [opus-2020-01-20.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-loz/opus-2020-01-20.test.txt) * test set scores: [opus-2020-01-20.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-loz/opus-2020-01-20.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.de.loz | 27.7 | 0.480 |
Bagus/wav2vec2-xlsr-japanese-speech-emotion-recognition
[ "pytorch", "wav2vec2", "audio-classification", "ja", "dataset:jtes", "transformers", "audio", "speech", "speech-emotion-recognition", "has_space" ]
audio-classification
{ "architectures": [ "HubertForSequenceClassification" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-de-pon * source languages: de * target languages: pon * OPUS readme: [de-pon](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/de-pon/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-20.zip](https://object.pouta.csc.fi/OPUS-MT-models/de-pon/opus-2020-01-20.zip) * test set translations: [opus-2020-01-20.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-pon/opus-2020-01-20.test.txt) * test set scores: [opus-2020-01-20.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/de-pon/opus-2020-01-20.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.de.pon | 21.0 | 0.442 |
Bala/model_name
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - de - uk tags: - translation license: apache-2.0 --- ### deu-ukr * source group: German * target group: Ukrainian * OPUS readme: [deu-ukr](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/deu-ukr/README.md) * model: transformer-align * source language(s): deu * target language(s): ukr * model: transformer-align * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus-2020-06-17.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/deu-ukr/opus-2020-06-17.zip) * test set translations: [opus-2020-06-17.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/deu-ukr/opus-2020-06-17.test.txt) * test set scores: [opus-2020-06-17.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/deu-ukr/opus-2020-06-17.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.deu.ukr | 40.2 | 0.612 | ### System Info: - hf_name: deu-ukr - source_languages: deu - target_languages: ukr - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/deu-ukr/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['de', 'uk'] - src_constituents: {'deu'} - tgt_constituents: {'ukr'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/deu-ukr/opus-2020-06-17.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/deu-ukr/opus-2020-06-17.test.txt - src_alpha3: deu - tgt_alpha3: ukr - short_pair: de-uk - chrF2_score: 0.612 - bleu: 40.2 - brevity_penalty: 0.9840000000000001 - ref_len: 54213.0 - src_name: German - tgt_name: Ukrainian - train_date: 2020-06-17 - src_alpha2: de - tgt_alpha2: uk - prefer_old: False - long_pair: deu-ukr - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Balgow/prod_desc
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - ta - kn - ml - te - dra - en tags: - translation license: apache-2.0 --- ### dra-eng * source group: Dravidian languages * target group: English * OPUS readme: [dra-eng](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/dra-eng/README.md) * model: transformer * source language(s): kan mal tam tel * target language(s): eng * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus2m-2020-07-31.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/dra-eng/opus2m-2020-07-31.zip) * test set translations: [opus2m-2020-07-31.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/dra-eng/opus2m-2020-07-31.test.txt) * test set scores: [opus2m-2020-07-31.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/dra-eng/opus2m-2020-07-31.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.kan-eng.kan.eng | 9.1 | 0.312 | | Tatoeba-test.mal-eng.mal.eng | 42.0 | 0.584 | | Tatoeba-test.multi.eng | 30.0 | 0.493 | | Tatoeba-test.tam-eng.tam.eng | 30.2 | 0.467 | | Tatoeba-test.tel-eng.tel.eng | 15.9 | 0.378 | ### System Info: - hf_name: dra-eng - source_languages: dra - target_languages: eng - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/dra-eng/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['ta', 'kn', 'ml', 'te', 'dra', 'en'] - src_constituents: {'tam', 'kan', 'mal', 'tel'} - tgt_constituents: {'eng'} - src_multilingual: True - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/dra-eng/opus2m-2020-07-31.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/dra-eng/opus2m-2020-07-31.test.txt - src_alpha3: dra - tgt_alpha3: eng - short_pair: dra-en - chrF2_score: 0.493 - bleu: 30.0 - brevity_penalty: 1.0 - ref_len: 10641.0 - src_name: Dravidian languages - tgt_name: English - train_date: 2020-07-31 - src_alpha2: dra - tgt_alpha2: en - prefer_old: False - long_pair: dra-eng - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Batsy24/DialoGPT-medium-Twilight_BellaBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-efi-sv * source languages: efi * target languages: sv * OPUS readme: [efi-sv](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/efi-sv/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-08.zip](https://object.pouta.csc.fi/OPUS-MT-models/efi-sv/opus-2020-01-08.zip) * test set translations: [opus-2020-01-08.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/efi-sv/opus-2020-01-08.test.txt) * test set scores: [opus-2020-01-08.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/efi-sv/opus-2020-01-08.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.efi.sv | 26.8 | 0.447 |
BatuhanYilmaz/mt5-small-finetuned-amazonbooks-en-es
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
"2020-08-19T00:10:31Z"
--- language: - en - ar tags: - translation license: apache-2.0 --- ### eng-ara * source group: English * target group: Arabic * OPUS readme: [eng-ara](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-ara/README.md) * model: transformer * source language(s): eng * target language(s): acm afb apc apc_Latn ara ara_Latn arq arq_Latn ary arz * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * a sentence initial language token is required in the form of `>>id<<` (id = valid target language ID) * download original weights: [opus-2020-07-03.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-ara/opus-2020-07-03.zip) * test set translations: [opus-2020-07-03.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-ara/opus-2020-07-03.test.txt) * test set scores: [opus-2020-07-03.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-ara/opus-2020-07-03.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.eng.ara | 14.0 | 0.437 | ### System Info: - hf_name: eng-ara - source_languages: eng - target_languages: ara - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-ara/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['en', 'ar'] - src_constituents: {'eng'} - tgt_constituents: {'apc', 'ara', 'arq_Latn', 'arq', 'afb', 'ara_Latn', 'apc_Latn', 'arz'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-ara/opus-2020-07-03.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-ara/opus-2020-07-03.test.txt - src_alpha3: eng - tgt_alpha3: ara - short_pair: en-ar - chrF2_score: 0.43700000000000006 - bleu: 14.0 - brevity_penalty: 1.0 - ref_len: 58935.0 - src_name: English - tgt_name: Arabic - train_date: 2020-07-03 - src_alpha2: en - tgt_alpha2: ar - prefer_old: False - long_pair: eng-ara - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Baybars/wav2vec2-xls-r-1b-turkish
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "tr", "dataset:common_voice", "transformers", "common_voice", "generated_from_trainer" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
"2020-08-19T00:10:44Z"
--- language: - en - lt - lv - bat tags: - translation license: apache-2.0 --- ### eng-bat * source group: English * target group: Baltic languages * OPUS readme: [eng-bat](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-bat/README.md) * model: transformer * source language(s): eng * target language(s): lav lit ltg prg_Latn sgs * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * a sentence initial language token is required in the form of `>>id<<` (id = valid target language ID) * download original weights: [opus2m-2020-08-01.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-bat/opus2m-2020-08-01.zip) * test set translations: [opus2m-2020-08-01.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-bat/opus2m-2020-08-01.test.txt) * test set scores: [opus2m-2020-08-01.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-bat/opus2m-2020-08-01.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | newsdev2017-enlv-englav.eng.lav | 24.0 | 0.546 | | newsdev2019-enlt-englit.eng.lit | 20.9 | 0.533 | | newstest2017-enlv-englav.eng.lav | 18.3 | 0.506 | | newstest2019-enlt-englit.eng.lit | 13.6 | 0.466 | | Tatoeba-test.eng-lav.eng.lav | 42.8 | 0.652 | | Tatoeba-test.eng-lit.eng.lit | 37.1 | 0.650 | | Tatoeba-test.eng.multi | 37.0 | 0.616 | | Tatoeba-test.eng-prg.eng.prg | 0.5 | 0.130 | | Tatoeba-test.eng-sgs.eng.sgs | 4.1 | 0.178 | ### System Info: - hf_name: eng-bat - source_languages: eng - target_languages: bat - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-bat/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['en', 'lt', 'lv', 'bat'] - src_constituents: {'eng'} - tgt_constituents: {'lit', 'lav', 'prg_Latn', 'ltg', 'sgs'} - src_multilingual: False - tgt_multilingual: True - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-bat/opus2m-2020-08-01.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-bat/opus2m-2020-08-01.test.txt - src_alpha3: eng - tgt_alpha3: bat - short_pair: en-bat - chrF2_score: 0.616 - bleu: 37.0 - brevity_penalty: 0.956 - ref_len: 26417.0 - src_name: English - tgt_name: Baltic languages - train_date: 2020-08-01 - src_alpha2: en - tgt_alpha2: bat - prefer_old: False - long_pair: eng-bat - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
BeIR/sparta-msmarco-distilbert-base-v1
[ "pytorch", "distilbert", "feature-extraction", "arxiv:2009.13013", "arxiv:2104.08663", "transformers" ]
feature-extraction
{ "architectures": [ "DistilBertModel" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
106
"2020-04-29T01:41:27Z"
--- language: - en - bg tags: - translation license: apache-2.0 --- ### eng-bul * source group: English * target group: Bulgarian * OPUS readme: [eng-bul](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-bul/README.md) * model: transformer * source language(s): eng * target language(s): bul bul_Latn * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * a sentence initial language token is required in the form of `>>id<<` (id = valid target language ID) * download original weights: [opus-2020-07-03.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-bul/opus-2020-07-03.zip) * test set translations: [opus-2020-07-03.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-bul/opus-2020-07-03.test.txt) * test set scores: [opus-2020-07-03.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-bul/opus-2020-07-03.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.eng.bul | 50.6 | 0.680 | ### System Info: - hf_name: eng-bul - source_languages: eng - target_languages: bul - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-bul/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['en', 'bg'] - src_constituents: {'eng'} - tgt_constituents: {'bul', 'bul_Latn'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-bul/opus-2020-07-03.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-bul/opus-2020-07-03.test.txt - src_alpha3: eng - tgt_alpha3: bul - short_pair: en-bg - chrF2_score: 0.68 - bleu: 50.6 - brevity_penalty: 0.96 - ref_len: 69504.0 - src_name: English - tgt_name: Bulgarian - train_date: 2020-07-03 - src_alpha2: en - tgt_alpha2: bg - prefer_old: False - long_pair: eng-bul - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Beatriz/model_name
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
"2020-08-19T00:10:48Z"
--- language: - en - sn - zu - rw - lg - ts - ln - ny - xh - rn - bnt tags: - translation license: apache-2.0 --- ### eng-bnt * source group: English * target group: Bantu languages * OPUS readme: [eng-bnt](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-bnt/README.md) * model: transformer * source language(s): eng * target language(s): kin lin lug nya run sna swh toi_Latn tso umb xho zul * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * a sentence initial language token is required in the form of `>>id<<` (id = valid target language ID) * download original weights: [opus-2020-07-26.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-bnt/opus-2020-07-26.zip) * test set translations: [opus-2020-07-26.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-bnt/opus-2020-07-26.test.txt) * test set scores: [opus-2020-07-26.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-bnt/opus-2020-07-26.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.eng-kin.eng.kin | 12.5 | 0.519 | | Tatoeba-test.eng-lin.eng.lin | 1.1 | 0.277 | | Tatoeba-test.eng-lug.eng.lug | 4.8 | 0.415 | | Tatoeba-test.eng.multi | 12.1 | 0.449 | | Tatoeba-test.eng-nya.eng.nya | 22.1 | 0.616 | | Tatoeba-test.eng-run.eng.run | 13.2 | 0.492 | | Tatoeba-test.eng-sna.eng.sna | 32.1 | 0.669 | | Tatoeba-test.eng-swa.eng.swa | 1.7 | 0.180 | | Tatoeba-test.eng-toi.eng.toi | 10.7 | 0.266 | | Tatoeba-test.eng-tso.eng.tso | 26.9 | 0.631 | | Tatoeba-test.eng-umb.eng.umb | 5.2 | 0.295 | | Tatoeba-test.eng-xho.eng.xho | 22.6 | 0.615 | | Tatoeba-test.eng-zul.eng.zul | 41.1 | 0.769 | ### System Info: - hf_name: eng-bnt - source_languages: eng - target_languages: bnt - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-bnt/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['en', 'sn', 'zu', 'rw', 'lg', 'ts', 'ln', 'ny', 'xh', 'rn', 'bnt'] - src_constituents: {'eng'} - tgt_constituents: {'sna', 'zul', 'kin', 'lug', 'tso', 'lin', 'nya', 'xho', 'swh', 'run', 'toi_Latn', 'umb'} - src_multilingual: False - tgt_multilingual: True - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-bnt/opus-2020-07-26.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-bnt/opus-2020-07-26.test.txt - src_alpha3: eng - tgt_alpha3: bnt - short_pair: en-bnt - chrF2_score: 0.449 - bleu: 12.1 - brevity_penalty: 1.0 - ref_len: 9989.0 - src_name: English - tgt_name: Bantu languages - train_date: 2020-07-26 - src_alpha2: en - tgt_alpha2: bnt - prefer_old: False - long_pair: eng-bnt - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Bee-Garbs/DialoGPT-real-cartman-small
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
"2020-04-29T01:42:27Z"
--- tags: - translation license: apache-2.0 --- ### opus-mt-en-ca * source languages: en * target languages: ca * OPUS readme: [en-ca](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/en-ca/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2019-12-18.zip](https://object.pouta.csc.fi/OPUS-MT-models/en-ca/opus-2019-12-18.zip) * test set translations: [opus-2019-12-18.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-ca/opus-2019-12-18.test.txt) * test set scores: [opus-2019-12-18.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-ca/opus-2019-12-18.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba.en.ca | 47.2 | 0.665 |
Beelow/model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
"2020-04-29T01:42:45Z"
--- tags: - translation license: apache-2.0 --- ### opus-mt-en-ceb * source languages: en * target languages: ceb * OPUS readme: [en-ceb](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/en-ceb/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-08.zip](https://object.pouta.csc.fi/OPUS-MT-models/en-ceb/opus-2020-01-08.zip) * test set translations: [opus-2020-01-08.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-ceb/opus-2020-01-08.test.txt) * test set scores: [opus-2020-01-08.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-ceb/opus-2020-01-08.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.en.ceb | 51.3 | 0.704 | | Tatoeba.en.ceb | 31.3 | 0.600 |
Beelow/wav2vec2-ukrainian-model-large
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
"2020-08-19T00:10:58Z"
--- language: - en - gd - ga - br - kw - gv - cy - cel tags: - translation license: apache-2.0 --- ### eng-cel * source group: English * target group: Celtic languages * OPUS readme: [eng-cel](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-cel/README.md) * model: transformer * source language(s): eng * target language(s): bre cor cym gla gle glv * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * a sentence initial language token is required in the form of `>>id<<` (id = valid target language ID) * download original weights: [opus2m-2020-08-01.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-cel/opus2m-2020-08-01.zip) * test set translations: [opus2m-2020-08-01.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-cel/opus2m-2020-08-01.test.txt) * test set scores: [opus2m-2020-08-01.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-cel/opus2m-2020-08-01.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.eng-bre.eng.bre | 11.5 | 0.338 | | Tatoeba-test.eng-cor.eng.cor | 0.3 | 0.095 | | Tatoeba-test.eng-cym.eng.cym | 31.0 | 0.549 | | Tatoeba-test.eng-gla.eng.gla | 7.6 | 0.317 | | Tatoeba-test.eng-gle.eng.gle | 35.9 | 0.582 | | Tatoeba-test.eng-glv.eng.glv | 9.9 | 0.454 | | Tatoeba-test.eng.multi | 18.0 | 0.342 | ### System Info: - hf_name: eng-cel - source_languages: eng - target_languages: cel - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-cel/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['en', 'gd', 'ga', 'br', 'kw', 'gv', 'cy', 'cel'] - src_constituents: {'eng'} - tgt_constituents: {'gla', 'gle', 'bre', 'cor', 'glv', 'cym'} - src_multilingual: False - tgt_multilingual: True - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-cel/opus2m-2020-08-01.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-cel/opus2m-2020-08-01.test.txt - src_alpha3: eng - tgt_alpha3: cel - short_pair: en-cel - chrF2_score: 0.342 - bleu: 18.0 - brevity_penalty: 0.9590000000000001 - ref_len: 45370.0 - src_name: English - tgt_name: Celtic languages - train_date: 2020-08-01 - src_alpha2: en - tgt_alpha2: cel - prefer_old: False - long_pair: eng-cel - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Belin/T5-Terms-and-Conditions
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
"2020-08-19T00:11:01Z"
--- language: - en - ht - cpf tags: - translation license: apache-2.0 --- ### eng-cpf * source group: English * target group: Creoles and pidgins, French‑based * OPUS readme: [eng-cpf](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-cpf/README.md) * model: transformer * source language(s): eng * target language(s): gcf_Latn hat mfe * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * a sentence initial language token is required in the form of `>>id<<` (id = valid target language ID) * download original weights: [opus-2020-07-26.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-cpf/opus-2020-07-26.zip) * test set translations: [opus-2020-07-26.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-cpf/opus-2020-07-26.test.txt) * test set scores: [opus-2020-07-26.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-cpf/opus-2020-07-26.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.eng-gcf.eng.gcf | 6.2 | 0.262 | | Tatoeba-test.eng-hat.eng.hat | 25.7 | 0.451 | | Tatoeba-test.eng-mfe.eng.mfe | 80.1 | 0.900 | | Tatoeba-test.eng.multi | 15.9 | 0.354 | ### System Info: - hf_name: eng-cpf - source_languages: eng - target_languages: cpf - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-cpf/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['en', 'ht', 'cpf'] - src_constituents: {'eng'} - tgt_constituents: {'gcf_Latn', 'hat', 'mfe'} - src_multilingual: False - tgt_multilingual: True - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-cpf/opus-2020-07-26.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-cpf/opus-2020-07-26.test.txt - src_alpha3: eng - tgt_alpha3: cpf - short_pair: en-cpf - chrF2_score: 0.354 - bleu: 15.9 - brevity_penalty: 1.0 - ref_len: 1012.0 - src_name: English - tgt_name: Creoles and pidgins, French‑based - train_date: 2020-07-26 - src_alpha2: en - tgt_alpha2: cpf - prefer_old: False - long_pair: eng-cpf - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
BenDavis71/GPT-2-Finetuning-AIRaid
[ "pytorch", "jax", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
"2020-04-29T01:43:16Z"
--- tags: - translation license: apache-2.0 --- ### opus-mt-en-crs * source languages: en * target languages: crs * OPUS readme: [en-crs](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/en-crs/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-08.zip](https://object.pouta.csc.fi/OPUS-MT-models/en-crs/opus-2020-01-08.zip) * test set translations: [opus-2020-01-08.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-crs/opus-2020-01-08.test.txt) * test set scores: [opus-2020-01-08.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-crs/opus-2020-01-08.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.en.crs | 45.2 | 0.617 |
BenGeorge/MyModel
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-en-cs * source languages: en * target languages: cs * OPUS readme: [en-cs](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/en-cs/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2019-12-18.zip](https://object.pouta.csc.fi/OPUS-MT-models/en-cs/opus-2019-12-18.zip) * test set translations: [opus-2019-12-18.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-cs/opus-2019-12-18.test.txt) * test set scores: [opus-2019-12-18.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-cs/opus-2019-12-18.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | newssyscomb2009.en.cs | 22.8 | 0.507 | | news-test2008.en.cs | 20.7 | 0.485 | | newstest2009.en.cs | 21.8 | 0.500 | | newstest2010.en.cs | 22.1 | 0.505 | | newstest2011.en.cs | 23.2 | 0.507 | | newstest2012.en.cs | 20.8 | 0.482 | | newstest2013.en.cs | 24.7 | 0.514 | | newstest2015-encs.en.cs | 24.9 | 0.527 | | newstest2016-encs.en.cs | 26.7 | 0.540 | | newstest2017-encs.en.cs | 22.7 | 0.503 | | newstest2018-encs.en.cs | 22.9 | 0.504 | | newstest2019-encs.en.cs | 24.9 | 0.518 | | Tatoeba.en.cs | 46.1 | 0.647 |
Benicio/t5-small-finetuned-en-to-ro
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
"2020-04-29T01:44:06Z"
--- tags: - translation license: apache-2.0 --- ### opus-mt-en-da * source languages: en * target languages: da * OPUS readme: [en-da](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/en-da/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2019-12-18.zip](https://object.pouta.csc.fi/OPUS-MT-models/en-da/opus-2019-12-18.zip) * test set translations: [opus-2019-12-18.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-da/opus-2019-12-18.test.txt) * test set scores: [opus-2019-12-18.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-da/opus-2019-12-18.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba.en.da | 60.4 | 0.745 |
Beri/legal-qa
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
"2020-08-19T00:11:19Z"
--- language: - en - ta - kn - ml - te - dra tags: - translation license: apache-2.0 --- ### eng-dra * source group: English * target group: Dravidian languages * OPUS readme: [eng-dra](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-dra/README.md) * model: transformer * source language(s): eng * target language(s): kan mal tam tel * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * a sentence initial language token is required in the form of `>>id<<` (id = valid target language ID) * download original weights: [opus-2020-07-26.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-dra/opus-2020-07-26.zip) * test set translations: [opus-2020-07-26.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-dra/opus-2020-07-26.test.txt) * test set scores: [opus-2020-07-26.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-dra/opus-2020-07-26.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.eng-kan.eng.kan | 4.7 | 0.348 | | Tatoeba-test.eng-mal.eng.mal | 13.1 | 0.515 | | Tatoeba-test.eng.multi | 10.7 | 0.463 | | Tatoeba-test.eng-tam.eng.tam | 9.0 | 0.444 | | Tatoeba-test.eng-tel.eng.tel | 7.1 | 0.363 | ### System Info: - hf_name: eng-dra - source_languages: eng - target_languages: dra - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-dra/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['en', 'ta', 'kn', 'ml', 'te', 'dra'] - src_constituents: {'eng'} - tgt_constituents: {'tam', 'kan', 'mal', 'tel'} - src_multilingual: False - tgt_multilingual: True - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-dra/opus-2020-07-26.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-dra/opus-2020-07-26.test.txt - src_alpha3: eng - tgt_alpha3: dra - short_pair: en-dra - chrF2_score: 0.46299999999999997 - bleu: 10.7 - brevity_penalty: 1.0 - ref_len: 7928.0 - src_name: English - tgt_name: Dravidian languages - train_date: 2020-07-26 - src_alpha2: en - tgt_alpha2: dra - prefer_old: False - long_pair: eng-dra - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Bharathdamu/wav2vec2-large-xls-r-300m-hindi3-colab
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
"2020-08-19T00:11:28Z"
--- language: - en - euq tags: - translation license: apache-2.0 --- ### eng-euq * source group: English * target group: Basque (family) * OPUS readme: [eng-euq](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-euq/README.md) * model: transformer * source language(s): eng * target language(s): eus * model: transformer * pre-processing: normalization + SentencePiece (spm12k,spm12k) * download original weights: [opus-2020-07-26.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-euq/opus-2020-07-26.zip) * test set translations: [opus-2020-07-26.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-euq/opus-2020-07-26.test.txt) * test set scores: [opus-2020-07-26.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-euq/opus-2020-07-26.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.eng.eus | 27.9 | 0.555 | | Tatoeba-test.eng-eus.eng.eus | 27.9 | 0.555 | ### System Info: - hf_name: eng-euq - source_languages: eng - target_languages: euq - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-euq/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['en', 'euq'] - src_constituents: {'eng'} - tgt_constituents: {'eus'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm12k,spm12k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-euq/opus-2020-07-26.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-euq/opus-2020-07-26.test.txt - src_alpha3: eng - tgt_alpha3: euq - short_pair: en-euq - chrF2_score: 0.555 - bleu: 27.9 - brevity_penalty: 0.917 - ref_len: 7080.0 - src_name: English - tgt_name: Basque (family) - train_date: 2020-07-26 - src_alpha2: en - tgt_alpha2: euq - prefer_old: False - long_pair: eng-euq - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
Bhumika/roberta-base-finetuned-sst2
[ "pytorch", "tensorboard", "roberta", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "model-index" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
85
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-en-fj * source languages: en * target languages: fj * OPUS readme: [en-fj](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/en-fj/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-08.zip](https://object.pouta.csc.fi/OPUS-MT-models/en-fj/opus-2020-01-08.zip) * test set translations: [opus-2020-01-08.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-fj/opus-2020-01-08.test.txt) * test set scores: [opus-2020-01-08.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-fj/opus-2020-01-08.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.en.fj | 34.0 | 0.561 | | Tatoeba.en.fj | 62.5 | 0.781 |
Bhuvana/t5-base-spellchecker
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
93
"2020-04-29T01:47:21Z"
--- tags: - translation license: apache-2.0 --- ### opus-mt-en-fr * source languages: en * target languages: fr * OPUS readme: [en-fr](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/en-fr/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-02-26.zip](https://object.pouta.csc.fi/OPUS-MT-models/en-fr/opus-2020-02-26.zip) * test set translations: [opus-2020-02-26.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-fr/opus-2020-02-26.test.txt) * test set scores: [opus-2020-02-26.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-fr/opus-2020-02-26.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | newsdiscussdev2015-enfr.en.fr | 33.8 | 0.602 | | newsdiscusstest2015-enfr.en.fr | 40.0 | 0.643 | | newssyscomb2009.en.fr | 29.8 | 0.584 | | news-test2008.en.fr | 27.5 | 0.554 | | newstest2009.en.fr | 29.4 | 0.577 | | newstest2010.en.fr | 32.7 | 0.596 | | newstest2011.en.fr | 34.3 | 0.611 | | newstest2012.en.fr | 31.8 | 0.592 | | newstest2013.en.fr | 33.2 | 0.589 | | Tatoeba.en.fr | 50.5 | 0.672 |
BigSalmon/BertaMyWorda
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
"2020-08-19T00:11:52Z"
--- language: - en - da - nb - sv - is - nn - fo - gmq tags: - translation license: apache-2.0 --- ### eng-gmq * source group: English * target group: North Germanic languages * OPUS readme: [eng-gmq](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-gmq/README.md) * model: transformer * source language(s): eng * target language(s): dan fao isl nno nob nob_Hebr non_Latn swe * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * a sentence initial language token is required in the form of `>>id<<` (id = valid target language ID) * download original weights: [opus2m-2020-08-01.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-gmq/opus2m-2020-08-01.zip) * test set translations: [opus2m-2020-08-01.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-gmq/opus2m-2020-08-01.test.txt) * test set scores: [opus2m-2020-08-01.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-gmq/opus2m-2020-08-01.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.eng-dan.eng.dan | 57.7 | 0.724 | | Tatoeba-test.eng-fao.eng.fao | 9.2 | 0.322 | | Tatoeba-test.eng-isl.eng.isl | 23.8 | 0.506 | | Tatoeba-test.eng.multi | 52.8 | 0.688 | | Tatoeba-test.eng-non.eng.non | 0.7 | 0.196 | | Tatoeba-test.eng-nor.eng.nor | 50.3 | 0.678 | | Tatoeba-test.eng-swe.eng.swe | 57.8 | 0.717 | ### System Info: - hf_name: eng-gmq - source_languages: eng - target_languages: gmq - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-gmq/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['en', 'da', 'nb', 'sv', 'is', 'nn', 'fo', 'gmq'] - src_constituents: {'eng'} - tgt_constituents: {'dan', 'nob', 'nob_Hebr', 'swe', 'isl', 'nno', 'non_Latn', 'fao'} - src_multilingual: False - tgt_multilingual: True - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-gmq/opus2m-2020-08-01.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-gmq/opus2m-2020-08-01.test.txt - src_alpha3: eng - tgt_alpha3: gmq - short_pair: en-gmq - chrF2_score: 0.688 - bleu: 52.8 - brevity_penalty: 0.973 - ref_len: 71881.0 - src_name: English - tgt_name: North Germanic languages - train_date: 2020-08-01 - src_alpha2: en - tgt_alpha2: gmq - prefer_old: False - long_pair: eng-gmq - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
BigSalmon/BlankSlots
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
4
"2020-08-19T00:12:01Z"
--- language: - en - el - grk tags: - translation license: apache-2.0 --- ### eng-grk * source group: English * target group: Greek languages * OPUS readme: [eng-grk](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-grk/README.md) * model: transformer * source language(s): eng * target language(s): ell grc_Grek * model: transformer * pre-processing: normalization + SentencePiece (spm12k,spm12k) * a sentence initial language token is required in the form of `>>id<<` (id = valid target language ID) * download original weights: [opus2m-2020-08-01.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-grk/opus2m-2020-08-01.zip) * test set translations: [opus2m-2020-08-01.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-grk/opus2m-2020-08-01.test.txt) * test set scores: [opus2m-2020-08-01.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-grk/opus2m-2020-08-01.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.eng-ell.eng.ell | 53.8 | 0.723 | | Tatoeba-test.eng-grc.eng.grc | 0.1 | 0.102 | | Tatoeba-test.eng.multi | 45.6 | 0.677 | ### System Info: - hf_name: eng-grk - source_languages: eng - target_languages: grk - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-grk/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['en', 'el', 'grk'] - src_constituents: {'eng'} - tgt_constituents: {'grc_Grek', 'ell'} - src_multilingual: False - tgt_multilingual: True - prepro: normalization + SentencePiece (spm12k,spm12k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-grk/opus2m-2020-08-01.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-grk/opus2m-2020-08-01.test.txt - src_alpha3: eng - tgt_alpha3: grk - short_pair: en-grk - chrF2_score: 0.677 - bleu: 45.6 - brevity_penalty: 1.0 - ref_len: 59951.0 - src_name: English - tgt_name: Greek languages - train_date: 2020-08-01 - src_alpha2: en - tgt_alpha2: grk - prefer_old: False - long_pair: eng-grk - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
BigSalmon/GPT2HardArticleEasyArticle
[ "pytorch", "jax", "tensorboard", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-en-hu * source languages: en * target languages: hu * OPUS readme: [en-hu](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/en-hu/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2019-12-18.zip](https://object.pouta.csc.fi/OPUS-MT-models/en-hu/opus-2019-12-18.zip) * test set translations: [opus-2019-12-18.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-hu/opus-2019-12-18.test.txt) * test set scores: [opus-2019-12-18.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-hu/opus-2019-12-18.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba.en.hu | 40.1 | 0.628 |
BigSalmon/GPT2HardandEasy
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
"2020-08-19T00:12:06Z"
--- language: - en - hy tags: - translation license: apache-2.0 --- ### eng-hye * source group: English * target group: Armenian * OPUS readme: [eng-hye](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-hye/README.md) * model: transformer-align * source language(s): eng * target language(s): hye * model: transformer-align * pre-processing: normalization + SentencePiece (spm4k,spm4k) * download original weights: [opus-2020-06-16.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-hye/opus-2020-06-16.zip) * test set translations: [opus-2020-06-16.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-hye/opus-2020-06-16.test.txt) * test set scores: [opus-2020-06-16.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-hye/opus-2020-06-16.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.eng.hye | 16.6 | 0.404 | ### System Info: - hf_name: eng-hye - source_languages: eng - target_languages: hye - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-hye/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['en', 'hy'] - src_constituents: {'eng'} - tgt_constituents: {'hye', 'hye_Latn'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm4k,spm4k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-hye/opus-2020-06-16.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-hye/opus-2020-06-16.test.txt - src_alpha3: eng - tgt_alpha3: hye - short_pair: en-hy - chrF2_score: 0.40399999999999997 - bleu: 16.6 - brevity_penalty: 1.0 - ref_len: 5115.0 - src_name: English - tgt_name: Armenian - train_date: 2020-06-16 - src_alpha2: en - tgt_alpha2: hy - prefer_old: False - long_pair: eng-hye - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
BigSalmon/GPTHeHe
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - translation license: apache-2.0 --- ### opus-mt-en-id * source languages: en * target languages: id * OPUS readme: [en-id](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/en-id/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2019-12-18.zip](https://object.pouta.csc.fi/OPUS-MT-models/en-id/opus-2019-12-18.zip) * test set translations: [opus-2019-12-18.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-id/opus-2019-12-18.test.txt) * test set scores: [opus-2019-12-18.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-id/opus-2019-12-18.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba.en.id | 38.3 | 0.636 |
BigSalmon/GPTIntro
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
"2020-04-29T01:51:25Z"
--- tags: - translation license: apache-2.0 --- ### opus-mt-en-ig * source languages: en * target languages: ig * OPUS readme: [en-ig](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/en-ig/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-08.zip](https://object.pouta.csc.fi/OPUS-MT-models/en-ig/opus-2020-01-08.zip) * test set translations: [opus-2020-01-08.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-ig/opus-2020-01-08.test.txt) * test set scores: [opus-2020-01-08.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-ig/opus-2020-01-08.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.en.ig | 39.5 | 0.546 | | Tatoeba.en.ig | 3.8 | 0.297 |
BigSalmon/GPTNeo350MInformalToFormalLincoln
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- language: - en - bn - or - gu - mr - ur - hi - ps - os - as - si - iir tags: - translation license: apache-2.0 --- ### eng-iir * source group: English * target group: Indo-Iranian languages * OPUS readme: [eng-iir](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-iir/README.md) * model: transformer * source language(s): eng * target language(s): asm awa ben bho gom guj hif_Latn hin jdt_Cyrl kur_Arab kur_Latn mai mar npi ori oss pan_Guru pes pes_Latn pes_Thaa pnb pus rom san_Deva sin snd_Arab tgk_Cyrl tly_Latn urd zza * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * a sentence initial language token is required in the form of `>>id<<` (id = valid target language ID) * download original weights: [opus2m-2020-08-01.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-iir/opus2m-2020-08-01.zip) * test set translations: [opus2m-2020-08-01.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-iir/opus2m-2020-08-01.test.txt) * test set scores: [opus2m-2020-08-01.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-iir/opus2m-2020-08-01.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | newsdev2014-enghin.eng.hin | 6.7 | 0.326 | | newsdev2019-engu-engguj.eng.guj | 6.0 | 0.283 | | newstest2014-hien-enghin.eng.hin | 10.4 | 0.353 | | newstest2019-engu-engguj.eng.guj | 6.6 | 0.282 | | Tatoeba-test.eng-asm.eng.asm | 2.7 | 0.249 | | Tatoeba-test.eng-awa.eng.awa | 0.4 | 0.122 | | Tatoeba-test.eng-ben.eng.ben | 15.3 | 0.459 | | Tatoeba-test.eng-bho.eng.bho | 3.7 | 0.161 | | Tatoeba-test.eng-fas.eng.fas | 3.4 | 0.227 | | Tatoeba-test.eng-guj.eng.guj | 18.5 | 0.365 | | Tatoeba-test.eng-hif.eng.hif | 1.0 | 0.064 | | Tatoeba-test.eng-hin.eng.hin | 17.0 | 0.461 | | Tatoeba-test.eng-jdt.eng.jdt | 3.9 | 0.122 | | Tatoeba-test.eng-kok.eng.kok | 5.5 | 0.059 | | Tatoeba-test.eng-kur.eng.kur | 4.0 | 0.125 | | Tatoeba-test.eng-lah.eng.lah | 0.3 | 0.008 | | Tatoeba-test.eng-mai.eng.mai | 9.3 | 0.445 | | Tatoeba-test.eng-mar.eng.mar | 20.7 | 0.473 | | Tatoeba-test.eng.multi | 13.7 | 0.392 | | Tatoeba-test.eng-nep.eng.nep | 0.6 | 0.060 | | Tatoeba-test.eng-ori.eng.ori | 2.4 | 0.193 | | Tatoeba-test.eng-oss.eng.oss | 2.1 | 0.174 | | Tatoeba-test.eng-pan.eng.pan | 9.7 | 0.355 | | Tatoeba-test.eng-pus.eng.pus | 1.0 | 0.126 | | Tatoeba-test.eng-rom.eng.rom | 1.3 | 0.230 | | Tatoeba-test.eng-san.eng.san | 1.3 | 0.101 | | Tatoeba-test.eng-sin.eng.sin | 11.7 | 0.384 | | Tatoeba-test.eng-snd.eng.snd | 2.8 | 0.180 | | Tatoeba-test.eng-tgk.eng.tgk | 8.1 | 0.353 | | Tatoeba-test.eng-tly.eng.tly | 0.5 | 0.015 | | Tatoeba-test.eng-urd.eng.urd | 12.3 | 0.409 | | Tatoeba-test.eng-zza.eng.zza | 0.5 | 0.025 | ### System Info: - hf_name: eng-iir - source_languages: eng - target_languages: iir - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-iir/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['en', 'bn', 'or', 'gu', 'mr', 'ur', 'hi', 'ps', 'os', 'as', 'si', 'iir'] - src_constituents: {'eng'} - tgt_constituents: {'pnb', 'gom', 'ben', 'hif_Latn', 'ori', 'guj', 'pan_Guru', 'snd_Arab', 'npi', 'mar', 'urd', 'pes', 'bho', 'kur_Arab', 'tgk_Cyrl', 'hin', 'kur_Latn', 'pes_Thaa', 'pus', 'san_Deva', 'oss', 'tly_Latn', 'jdt_Cyrl', 'asm', 'zza', 'rom', 'mai', 'pes_Latn', 'awa', 'sin'} - src_multilingual: False - tgt_multilingual: True - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-iir/opus2m-2020-08-01.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-iir/opus2m-2020-08-01.test.txt - src_alpha3: eng - tgt_alpha3: iir - short_pair: en-iir - chrF2_score: 0.392 - bleu: 13.7 - brevity_penalty: 1.0 - ref_len: 63351.0 - src_name: English - tgt_name: Indo-Iranian languages - train_date: 2020-08-01 - src_alpha2: en - tgt_alpha2: iir - prefer_old: False - long_pair: eng-iir - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41