modelId
stringlengths
5
122
author
stringlengths
2
42
last_modified
unknown
downloads
int64
0
738M
likes
int64
0
11k
library_name
stringclasses
245 values
tags
sequencelengths
1
4.05k
pipeline_tag
stringclasses
48 values
createdAt
unknown
card
stringlengths
1
901k
itay-nakash/model_e4ad58a464_sweep_fresh-jazz-799
itay-nakash
"2024-06-22T19:32:12Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T19:32:12Z"
Entry not found
itay-nakash/model_e4ad58a464_sweep_dazzling-grass-800
itay-nakash
"2024-06-22T19:34:05Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T19:34:05Z"
Entry not found
Rohank05/output
Rohank05
"2024-06-22T19:34:09Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T19:34:09Z"
Entry not found
PatrickFemia/xgboost-new-pay-prediction
PatrickFemia
"2024-06-22T19:41:03Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T19:41:03Z"
Entry not found
karthikmit/openai-whisper-medium-LORA-EN-NQ-v1
karthikmit
"2024-06-22T19:45:07Z"
0
0
transformers
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
"2024-06-22T19:44:58Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a πŸ€— transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
blockblockblock/TinyLlama-1.1B-Chat-v1.0-bpw2.25-exl2
blockblockblock
"2024-07-01T01:44:58Z"
0
0
transformers
[ "transformers", "llama", "text-generation", "conversational", "en", "dataset:cerebras/SlimPajama-627B", "dataset:bigcode/starcoderdata", "dataset:HuggingFaceH4/ultrachat_200k", "dataset:HuggingFaceH4/ultrafeedback_binarized", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "exl2", "region:us" ]
text-generation
"2024-06-22T19:47:56Z"
--- license: apache-2.0 datasets: - cerebras/SlimPajama-627B - bigcode/starcoderdata - HuggingFaceH4/ultrachat_200k - HuggingFaceH4/ultrafeedback_binarized language: - en widget: - example_title: Fibonacci (Python) messages: - role: system content: You are a chatbot who can help code! - role: user content: Write me a function to calculate the first 10 digits of the fibonacci sequence in Python and print it out to the CLI. --- <div align="center"> # TinyLlama-1.1B </div> https://github.com/jzhang38/TinyLlama The TinyLlama project aims to **pretrain** a **1.1B Llama model on 3 trillion tokens**. With some proper optimization, we can achieve this within a span of "just" 90 days using 16 A100-40G GPUs πŸš€πŸš€. The training has started on 2023-09-01. We adopted exactly the same architecture and tokenizer as Llama 2. This means TinyLlama can be plugged and played in many open-source projects built upon Llama. Besides, TinyLlama is compact with only 1.1B parameters. This compactness allows it to cater to a multitude of applications demanding a restricted computation and memory footprint. #### This Model This is the chat model finetuned on top of [TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T](https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T). **We follow [HF's Zephyr](https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha)'s training recipe.** The model was " initially fine-tuned on a variant of the [`UltraChat`](https://huggingface.co/datasets/stingning/ultrachat) dataset, which contains a diverse range of synthetic dialogues generated by ChatGPT. We then further aligned the model with [πŸ€— TRL's](https://github.com/huggingface/trl) `DPOTrainer` on the [openbmb/UltraFeedback](https://huggingface.co/datasets/openbmb/UltraFeedback) dataset, which contain 64k prompts and model completions that are ranked by GPT-4." #### How to use You will need the transformers>=4.34 Do check the [TinyLlama](https://github.com/jzhang38/TinyLlama) github page for more information. ```python # Install transformers from source - only needed for versions <= v4.34 # pip install git+https://github.com/huggingface/transformers.git # pip install accelerate import torch from transformers import pipeline pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto") # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating messages = [ { "role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate", }, {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, ] prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) # <|system|> # You are a friendly chatbot who always responds in the style of a pirate.</s> # <|user|> # How many helicopters can a human eat in one sitting?</s> # <|assistant|> # ... ```
Nafis33/Nafis
Nafis33
"2024-06-22T19:56:02Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T19:56:01Z"
Entry not found
blockblockblock/TinyLlama-1.1B-Chat-v1.0-bpw2.5-exl2
blockblockblock
"2024-06-22T19:57:40Z"
0
0
transformers
[ "transformers", "llama", "text-generation", "conversational", "en", "dataset:cerebras/SlimPajama-627B", "dataset:bigcode/starcoderdata", "dataset:HuggingFaceH4/ultrachat_200k", "dataset:HuggingFaceH4/ultrafeedback_binarized", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "exl2", "region:us" ]
text-generation
"2024-06-22T19:57:21Z"
--- license: apache-2.0 datasets: - cerebras/SlimPajama-627B - bigcode/starcoderdata - HuggingFaceH4/ultrachat_200k - HuggingFaceH4/ultrafeedback_binarized language: - en widget: - example_title: Fibonacci (Python) messages: - role: system content: You are a chatbot who can help code! - role: user content: Write me a function to calculate the first 10 digits of the fibonacci sequence in Python and print it out to the CLI. --- <div align="center"> # TinyLlama-1.1B </div> https://github.com/jzhang38/TinyLlama The TinyLlama project aims to **pretrain** a **1.1B Llama model on 3 trillion tokens**. With some proper optimization, we can achieve this within a span of "just" 90 days using 16 A100-40G GPUs πŸš€πŸš€. The training has started on 2023-09-01. We adopted exactly the same architecture and tokenizer as Llama 2. This means TinyLlama can be plugged and played in many open-source projects built upon Llama. Besides, TinyLlama is compact with only 1.1B parameters. This compactness allows it to cater to a multitude of applications demanding a restricted computation and memory footprint. #### This Model This is the chat model finetuned on top of [TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T](https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T). **We follow [HF's Zephyr](https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha)'s training recipe.** The model was " initially fine-tuned on a variant of the [`UltraChat`](https://huggingface.co/datasets/stingning/ultrachat) dataset, which contains a diverse range of synthetic dialogues generated by ChatGPT. We then further aligned the model with [πŸ€— TRL's](https://github.com/huggingface/trl) `DPOTrainer` on the [openbmb/UltraFeedback](https://huggingface.co/datasets/openbmb/UltraFeedback) dataset, which contain 64k prompts and model completions that are ranked by GPT-4." #### How to use You will need the transformers>=4.34 Do check the [TinyLlama](https://github.com/jzhang38/TinyLlama) github page for more information. ```python # Install transformers from source - only needed for versions <= v4.34 # pip install git+https://github.com/huggingface/transformers.git # pip install accelerate import torch from transformers import pipeline pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto") # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating messages = [ { "role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate", }, {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, ] prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) # <|system|> # You are a friendly chatbot who always responds in the style of a pirate.</s> # <|user|> # How many helicopters can a human eat in one sitting?</s> # <|assistant|> # ... ```
LarryAIDraw/eznegativexl
LarryAIDraw
"2024-06-22T20:01:07Z"
0
0
null
[ "license:creativeml-openrail-m", "region:us" ]
null
"2024-06-22T19:59:15Z"
--- license: creativeml-openrail-m --- https://civitai.com/models/368864/eznegative-xlponysd?modelVersionId=412173
Xrunner/dpo
Xrunner
"2024-06-22T19:59:53Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T19:59:24Z"
Entry not found
LarryAIDraw/DeepNegative_xl_v1
LarryAIDraw
"2024-06-22T20:00:20Z"
0
0
null
[ "license:creativeml-openrail-m", "region:us" ]
null
"2024-06-22T19:59:37Z"
--- license: creativeml-openrail-m --- https://civitai.com/models/407448/deep-negative-xl?modelVersionId=454217
LarryAIDraw/Kafka-000008
LarryAIDraw
"2024-06-22T20:16:23Z"
0
0
null
[ "license:creativeml-openrail-m", "region:us" ]
null
"2024-06-22T20:14:18Z"
--- license: creativeml-openrail-m --- https://civitai.com/models/529181/ponyv6-xl-kafka-from-honkai-star-rail-lora
Kokushibou/kok
Kokushibou
"2024-06-22T20:14:30Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T20:14:30Z"
Entry not found
asap-blocky/Niue-GPT
asap-blocky
"2024-06-23T01:11:17Z"
0
0
null
[ "license:apache-2.0", "region:us" ]
null
"2024-06-22T20:14:55Z"
--- license: apache-2.0 ---
blockblockblock/TinyLlama_v1.1-bpw2.25-exl2
blockblockblock
"2024-06-22T20:48:35Z"
0
0
transformers
[ "transformers", "llama", "text-generation", "en", "dataset:cerebras/SlimPajama-627B", "arxiv:2401.02385", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "exl2", "region:us" ]
text-generation
"2024-06-22T20:16:32Z"
--- license: apache-2.0 datasets: - cerebras/SlimPajama-627B language: - en --- # TinyLlama-1.1B-v1.1 - **Codebase:** [github.com/jzhang38/TinyLlama](https://github.com/jzhang38/TinyLlama) - **Technical Report:** [arxiv.org/pdf/2401.02385](https://arxiv.org/pdf/2401.02385) <div align="center"> <img src="https://huggingface.co/PY007/TinyLlama-1.1B-intermediate-step-240k-503b/resolve/main/TinyLlama_logo.png" width="300"/> </div> We adopted exactly the same architecture and tokenizer as Llama 2. This means TinyLlama can be plugged and played in many open-source projects built upon Llama. Besides, TinyLlama is compact with only 1.1B parameters. This compactness allows it to cater to a multitude of applications demanding a restricted computation and memory footprint. ## Overview In this project, rather than only training a single TinyLlama model, we first train TinyLlama on a corpus of 1.5 trillion tokens to obtain foundational language capabilities. Subsequently, we take this model and turn it into three different models by continual pre-training with three distinct data sampling. For a visual representation of this process, please refer to the figure below. ![Overview](overview.png) ## Pretraining Due to these issues([bug1](https://whimsical-aphid-86d.notion.site/Release-of-TinyLlama-1-5T-Checkpoints-Postponed-01b266998c1c47f78f5ae1520196d194?pvs=4), [bug2](https://whimsical-aphid-86d.notion.site/2023-12-18-Updates-from-TinyLlama-Team-7d30c01fff794da28ccc952f327c8d4f)). We try to retrain our TinyLlama to provide a better model. We train our model with 2T tokens and divided our pretraining into 3 different stages: 1) basic pretraining, 2) continual pretraining with specific domain, and 3) cooldown . #### Basic pretraining In this initial phase, we managed to train our model with only slimpajama to develop its commonsense reasoning capabilities. The model was trained with 1.5T tokens during this basic pretraining period. Since we used a cluster with 4 A100-40G per node and we only shard model weights within a node, we can only set the batch size to approximately 1.8M this time. #### Continual pretraining with specific domain We incorporated 3 different kinds of corpus during this pretraining, slimpajama (which is the same as the first phase), Math&Code (starcoder and proof pile), and Chinese (Skypile). This approach allowed us to develop three variant models with specialized capabilities. At the begining ~6B tokens in this stage, we linearly increased the sampling proportion for the domain-specific corpus (excluding Slimpajama, as it remained unchanged compared with stage 1). This warmup sampling increasing strategy was designed to gradually adjust the distribution of the pretraining data, ensuring a more stable training process. After this sampling increasing stage, we continued pretraining the model with stable sampling strategy until reaching ~1.85T tokens. #### Cooldown Implementing a cooldown phase has become a crucial technique to achieve better model convergence at the end of pretraining. However, since we have already used cosine learning rate strategy at the beginning, it becomes challenging to alter the learning rate for cooldown like what MiniCPM or deepseek does. Therefore, we try to cool down with adjusting our batch size. Specifically, we increase our batch size from 1.8M to 7.2M while keeping the original cosine learning rate schedule during our cooldown stage. #### Tinyllama model family Following an extensive and detailed pretraining process. We are now releasing three specialized versions of our model: 1. **TinyLlama_v1.1**: The standard version, used for general purposes. 2. **TinyLlama_v1.1_Math&Code**: Equipped with better ability for math and code. 3. **TinyLlama_v1.1_Chinese**: Good understanding capacity for Chinese. ## Data Here we list our data distribution in each stage: ### TinyLlama_v1.1 | Corpus | Basic pretraining | Continual pretraining with specific domain | Cooldown | | ------------- | ----------------- | ------------------------------------------ | -------- | | Slimpajama | 100.0 | 100.0 | 100.0 | ### TinyLlama_v1.1_math_code | Corpus | Basic pretraining | Continual pretraining with specific domain | Cooldown | | ------------- | ----------------- | ------------------------------------------ | -------- | | Slimpajama | 100.0 | 75.0 | 75.0 | | starcoder | - | 15.0 | 15.0 | | proof_pile | - | 10.0 | 10.0 | ### TinyLlama_v1.1_chinese | orpus | Basic pretraining | Continual pretraining with specific domain | Cooldown | | ------------- | ----------------- | ------------------------------------------ | -------- | | Slimpajama | 100.0 | 50.0 | 50.0 | | skypile | - | 50.0 | 50.0 | ### How to use You will need the transformers>=4.31 Do check the [TinyLlama](https://github.com/jzhang38/TinyLlama) GitHub page for more information. ``` from transformers import AutoTokenizer import transformers import torch model = "TinyLlama/TinyLlama_v1.1" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) sequences = pipeline( 'The TinyLlama project aims to pretrain a 1.1B Llama model on 3 trillion tokens. With some proper optimization, we can achieve this within a span of "just" 90 days using 16 A100-40G GPUs πŸš€πŸš€. The training has started on 2023-09-01.', do_sample=True, top_k=10, num_return_sequences=1, repetition_penalty=1.5, eos_token_id=tokenizer.eos_token_id, max_length=500, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` ### Eval | Model | Pretrain Tokens | HellaSwag | Obqa | WinoGrande | ARC_c | ARC_e | boolq | piqa | avg | | ----------------------------------------- | --------------- | --------- | --------- | ---------- | --------- | --------- | ----- | --------- | --------- | | Pythia-1.0B | 300B | 47.16 | 31.40 | 53.43 | 27.05 | 48.99 | 60.83 | 69.21 | 48.30 | | TinyLlama-1.1B-intermediate-step-1431k-3T | 3T | 59.20 | 36.00 | 59.12 | 30.12 | 55.25 | 57.83 | 73.29 | 52.99 | | TinyLlama-1.1B-v1.1 | 2T | **61.47** | **36.80** | 59.43 | 32.68 | **55.47** | 55.99 | **73.56** | 53.63 | | TinyLlama-1.1B-v1_math_code | 2T | 60.80 | 36.40 | **60.22** | **33.87** | 55.20 | 57.09 | 72.69 | **53.75** | | TinyLlama-1.1B-v1.1_chinese | 2T | 58.23 | 35.20 | 59.27 | 31.40 | 55.35 | **61.41** | 73.01 | 53.41 |
ostoveland/bgetest
ostoveland
"2024-06-22T20:19:47Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T20:19:47Z"
Entry not found
Sinensis/DarkForest-20B-v3.0-bpw3.5-h6-exl2
Sinensis
"2024-06-22T20:39:45Z"
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "merge", "not-for-all-audiences", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "exl2", "region:us" ]
text-generation
"2024-06-22T20:30:41Z"
--- license: other tags: - merge - not-for-all-audiences license_name: microsoft-research-license --- [exllamav2](https://github.com/turboderp/exllamav2) quant of [TeeZee/DarkForest-20B-v3.0](https://huggingface.co/TeeZee/DarkForest-20B-v3.0) using default calibration. --- # DarkForest 20B v3.0 ![image/png](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/resolve/main/DarkForest-20B-v3.0.jpg) ## Model Details - To create this model five step procedure was used. - The resulting model has approximately 20 billion parameters. - details of merge steps are in files: - [darkforest_v3_step1.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step1.yml) - [darkforest_v3_step2.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step2.yml) - [darkforest_v3_step3.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step3.yml) - [darkforest_v3_step4.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step4.yml) - [darkforest_v3_step5.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step5.yml) ## Models used - custom model, based on athirdpath/Orca-2-13b-Alpaca-Uncensored and KoboldAI/LLaMA2-13B-Erebus-v3 - BigMaid-20B-v2.0 - athirdpath/Harmonia-20B - athirdpath/Iambe-RP-v3-20b ## Models removed - jebcarter_psyonic-cetacean-20B ## Merge method - all merges done in float32 precision, when applicable, breadcrumbs_ties merge method was used. **Warning: This model can produce NSFW content!** ## Results - main difference to v2.x - model follows much better character cards and also user profile. - produces SFW nad NSFW content without issues, switches context seamlessly. - good at following instructions. - good at tracking multiple characters in one scene. - very creative, scenarios produced are mature and complicated, model doesn't shy from writing about PTSD, mental issues or complicated relationships. - NSFW output is more creative and suprising than typical limaRP output. - definitely for mature audiences, not only because of vivid NSFW content but also because of overall maturity of stories it produces. - This is NOT Harry Potter level storytelling. All comments are greatly appreciated, download, test and if you appreciate my work, consider buying me my fuel: <a href="https://www.buymeacoffee.com/TeeZee" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 60px !important;width: 217px !important;" ></a>
joaopaulopresa/mistral-7b-v0.3-orca-pt
joaopaulopresa
"2024-06-22T20:32:00Z"
0
0
transformers
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "mistral", "trl", "en", "base_model:unsloth/mistral-7b-v0.3", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
"2024-06-22T20:31:53Z"
--- base_model: unsloth/mistral-7b-v0.3 language: - en license: apache-2.0 tags: - text-generation-inference - transformers - unsloth - mistral - trl --- # Uploaded model - **Developed by:** joaopaulopresa - **License:** apache-2.0 - **Finetuned from model :** unsloth/mistral-7b-v0.3 This mistral model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
Donyamin/Zaman
Donyamin
"2024-06-22T20:32:04Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T20:32:04Z"
Entry not found
Casper0508/MSc_llama2_finetuned_model_secondData5
Casper0508
"2024-06-22T20:35:06Z"
0
0
peft
[ "peft", "tensorboard", "safetensors", "generated_from_trainer", "base_model:meta-llama/Llama-2-7b-chat-hf", "license:llama2", "region:us" ]
null
"2024-06-22T20:34:59Z"
--- license: llama2 base_model: meta-llama/Llama-2-7b-chat-hf tags: - generated_from_trainer model-index: - name: MSc_llama2_finetuned_model_secondData5 results: [] library_name: peft --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # MSc_llama2_finetuned_model_secondData5 This model is a fine-tuned version of [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.7187 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - _load_in_8bit: False - _load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: True - bnb_4bit_compute_dtype: bfloat16 - load_in_4bit: True - load_in_8bit: False ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.03 - training_steps: 250 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 3.9919 | 1.36 | 10 | 3.6771 | | 3.369 | 2.71 | 20 | 2.9923 | | 2.6302 | 4.07 | 30 | 2.2344 | | 1.9467 | 5.42 | 40 | 1.7496 | | 1.5893 | 6.78 | 50 | 1.5028 | | 1.2919 | 8.14 | 60 | 1.1706 | | 0.9447 | 9.49 | 70 | 0.8988 | | 0.8096 | 10.85 | 80 | 0.8443 | | 0.745 | 12.2 | 90 | 0.8025 | | 0.6904 | 13.56 | 100 | 0.7733 | | 0.6546 | 14.92 | 110 | 0.7539 | | 0.6267 | 16.27 | 120 | 0.7387 | | 0.5954 | 17.63 | 130 | 0.7316 | | 0.5799 | 18.98 | 140 | 0.7256 | | 0.5596 | 20.34 | 150 | 0.7228 | | 0.5432 | 21.69 | 160 | 0.7215 | | 0.5389 | 23.05 | 170 | 0.7176 | | 0.5234 | 24.41 | 180 | 0.7175 | | 0.518 | 25.76 | 190 | 0.7189 | | 0.5122 | 27.12 | 200 | 0.7177 | | 0.5036 | 28.47 | 210 | 0.7185 | | 0.5049 | 29.83 | 220 | 0.7191 | | 0.5041 | 31.19 | 230 | 0.7195 | | 0.5028 | 32.54 | 240 | 0.7188 | | 0.4973 | 33.9 | 250 | 0.7187 | ### Framework versions - PEFT 0.4.0 - Transformers 4.38.2 - Pytorch 2.3.1+cu121 - Datasets 2.13.1 - Tokenizers 0.15.2
Foodddd/Djdjsjdjd
Foodddd
"2024-06-22T20:39:47Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T20:38:09Z"
Entry not found
BrockTYS/Qwen1.5-0.5b
BrockTYS
"2024-06-22T20:45:43Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T20:45:43Z"
Entry not found
pamelaraya/ModelsPonyXL2
pamelaraya
"2024-06-22T22:28:42Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T20:47:31Z"
Entry not found
danielkosyra/polynomial_1450_7e-4_16b_w0.05
danielkosyra
"2024-06-22T20:47:56Z"
0
0
transformers
[ "transformers", "safetensors", "gpt2", "text-generation", "generated_from_trainer", "base_model:gpt2", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2024-06-22T20:47:37Z"
--- license: mit base_model: gpt2 tags: - generated_from_trainer model-index: - name: polynomial_1450_7e-4_16b_w0.05 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # polynomial_1450_7e-4_16b_w0.05 This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.0237 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0007 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 10 - total_train_batch_size: 160 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: polynomial - lr_scheduler_warmup_steps: 250 - training_steps: 1450 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 9.0635 | 0.1029 | 50 | 7.2771 | | 6.7176 | 0.2058 | 100 | 6.2551 | | 6.0127 | 0.3088 | 150 | 5.7232 | | 5.5517 | 0.4117 | 200 | 5.3470 | | 5.2297 | 0.5146 | 250 | 5.0446 | | 4.9361 | 0.6175 | 300 | 4.7729 | | 4.6976 | 0.7205 | 350 | 4.5588 | | 4.497 | 0.8234 | 400 | 4.3733 | | 4.3221 | 0.9263 | 450 | 4.1939 | | 4.1357 | 1.0292 | 500 | 4.0081 | | 3.892 | 1.1322 | 550 | 3.8139 | | 3.7559 | 1.2351 | 600 | 3.6703 | | 3.6297 | 1.3380 | 650 | 3.5671 | | 3.5399 | 1.4409 | 700 | 3.4772 | | 3.4656 | 1.5438 | 750 | 3.4074 | | 3.3949 | 1.6468 | 800 | 3.3532 | | 3.3297 | 1.7497 | 850 | 3.3031 | | 3.2878 | 1.8526 | 900 | 3.2604 | | 3.254 | 1.9555 | 950 | 3.2267 | | 3.1231 | 2.0585 | 1000 | 3.1899 | | 3.0568 | 2.1614 | 1050 | 3.1603 | | 3.0347 | 2.2643 | 1100 | 3.1349 | | 3.0197 | 2.3672 | 1150 | 3.1148 | | 2.9893 | 2.4702 | 1200 | 3.0940 | | 2.9801 | 2.5731 | 1250 | 3.0725 | | 2.951 | 2.6760 | 1300 | 3.0551 | | 2.9265 | 2.7789 | 1350 | 3.0397 | | 2.9438 | 2.8818 | 1400 | 3.0299 | | 2.9292 | 2.9848 | 1450 | 3.0237 | ### Framework versions - Transformers 4.40.1 - Pytorch 2.3.0+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
clockpocket/Slime-RVCv2
clockpocket
"2024-06-22T20:51:59Z"
0
0
null
[ "license:wtfpl", "region:us" ]
null
"2024-06-22T20:50:09Z"
--- license: wtfpl ---
ProElectro07/PatDoc
ProElectro07
"2024-06-22T20:58:26Z"
0
0
transformers
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
"2024-06-22T20:57:34Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a πŸ€— transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
Sinensis/DarkForest-20B-v3.0-bpw4.0-h6-exl2
Sinensis
"2024-06-22T21:05:28Z"
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "merge", "not-for-all-audiences", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "exl2", "region:us" ]
text-generation
"2024-06-22T20:59:07Z"
--- license: other tags: - merge - not-for-all-audiences license_name: microsoft-research-license --- [exllamav2](https://github.com/turboderp/exllamav2) quant of [TeeZee/DarkForest-20B-v3.0](https://huggingface.co/TeeZee/DarkForest-20B-v3.0) using default calibration. --- # DarkForest 20B v3.0 ![image/png](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/resolve/main/DarkForest-20B-v3.0.jpg) ## Model Details - To create this model five step procedure was used. - The resulting model has approximately 20 billion parameters. - details of merge steps are in files: - [darkforest_v3_step1.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step1.yml) - [darkforest_v3_step2.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step2.yml) - [darkforest_v3_step3.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step3.yml) - [darkforest_v3_step4.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step4.yml) - [darkforest_v3_step5.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step5.yml) ## Models used - custom model, based on athirdpath/Orca-2-13b-Alpaca-Uncensored and KoboldAI/LLaMA2-13B-Erebus-v3 - BigMaid-20B-v2.0 - athirdpath/Harmonia-20B - athirdpath/Iambe-RP-v3-20b ## Models removed - jebcarter_psyonic-cetacean-20B ## Merge method - all merges done in float32 precision, when applicable, breadcrumbs_ties merge method was used. **Warning: This model can produce NSFW content!** ## Results - main difference to v2.x - model follows much better character cards and also user profile. - produces SFW nad NSFW content without issues, switches context seamlessly. - good at following instructions. - good at tracking multiple characters in one scene. - very creative, scenarios produced are mature and complicated, model doesn't shy from writing about PTSD, mental issues or complicated relationships. - NSFW output is more creative and suprising than typical limaRP output. - definitely for mature audiences, not only because of vivid NSFW content but also because of overall maturity of stories it produces. - This is NOT Harry Potter level storytelling. All comments are greatly appreciated, download, test and if you appreciate my work, consider buying me my fuel: <a href="https://www.buymeacoffee.com/TeeZee" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 60px !important;width: 217px !important;" ></a>
acen20/Meta-Llama-3-8B-Q4_K_M-GGUF
acen20
"2024-06-22T21:02:14Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T21:02:14Z"
Entry not found
blockblockblock/TinyLlama_v1.1-bpw2.5-exl2
blockblockblock
"2024-06-22T21:07:23Z"
0
0
transformers
[ "transformers", "llama", "text-generation", "en", "dataset:cerebras/SlimPajama-627B", "arxiv:2401.02385", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "exl2", "region:us" ]
text-generation
"2024-06-22T21:07:01Z"
--- license: apache-2.0 datasets: - cerebras/SlimPajama-627B language: - en --- # TinyLlama-1.1B-v1.1 - **Codebase:** [github.com/jzhang38/TinyLlama](https://github.com/jzhang38/TinyLlama) - **Technical Report:** [arxiv.org/pdf/2401.02385](https://arxiv.org/pdf/2401.02385) <div align="center"> <img src="https://huggingface.co/PY007/TinyLlama-1.1B-intermediate-step-240k-503b/resolve/main/TinyLlama_logo.png" width="300"/> </div> We adopted exactly the same architecture and tokenizer as Llama 2. This means TinyLlama can be plugged and played in many open-source projects built upon Llama. Besides, TinyLlama is compact with only 1.1B parameters. This compactness allows it to cater to a multitude of applications demanding a restricted computation and memory footprint. ## Overview In this project, rather than only training a single TinyLlama model, we first train TinyLlama on a corpus of 1.5 trillion tokens to obtain foundational language capabilities. Subsequently, we take this model and turn it into three different models by continual pre-training with three distinct data sampling. For a visual representation of this process, please refer to the figure below. ![Overview](overview.png) ## Pretraining Due to these issues([bug1](https://whimsical-aphid-86d.notion.site/Release-of-TinyLlama-1-5T-Checkpoints-Postponed-01b266998c1c47f78f5ae1520196d194?pvs=4), [bug2](https://whimsical-aphid-86d.notion.site/2023-12-18-Updates-from-TinyLlama-Team-7d30c01fff794da28ccc952f327c8d4f)). We try to retrain our TinyLlama to provide a better model. We train our model with 2T tokens and divided our pretraining into 3 different stages: 1) basic pretraining, 2) continual pretraining with specific domain, and 3) cooldown . #### Basic pretraining In this initial phase, we managed to train our model with only slimpajama to develop its commonsense reasoning capabilities. The model was trained with 1.5T tokens during this basic pretraining period. Since we used a cluster with 4 A100-40G per node and we only shard model weights within a node, we can only set the batch size to approximately 1.8M this time. #### Continual pretraining with specific domain We incorporated 3 different kinds of corpus during this pretraining, slimpajama (which is the same as the first phase), Math&Code (starcoder and proof pile), and Chinese (Skypile). This approach allowed us to develop three variant models with specialized capabilities. At the begining ~6B tokens in this stage, we linearly increased the sampling proportion for the domain-specific corpus (excluding Slimpajama, as it remained unchanged compared with stage 1). This warmup sampling increasing strategy was designed to gradually adjust the distribution of the pretraining data, ensuring a more stable training process. After this sampling increasing stage, we continued pretraining the model with stable sampling strategy until reaching ~1.85T tokens. #### Cooldown Implementing a cooldown phase has become a crucial technique to achieve better model convergence at the end of pretraining. However, since we have already used cosine learning rate strategy at the beginning, it becomes challenging to alter the learning rate for cooldown like what MiniCPM or deepseek does. Therefore, we try to cool down with adjusting our batch size. Specifically, we increase our batch size from 1.8M to 7.2M while keeping the original cosine learning rate schedule during our cooldown stage. #### Tinyllama model family Following an extensive and detailed pretraining process. We are now releasing three specialized versions of our model: 1. **TinyLlama_v1.1**: The standard version, used for general purposes. 2. **TinyLlama_v1.1_Math&Code**: Equipped with better ability for math and code. 3. **TinyLlama_v1.1_Chinese**: Good understanding capacity for Chinese. ## Data Here we list our data distribution in each stage: ### TinyLlama_v1.1 | Corpus | Basic pretraining | Continual pretraining with specific domain | Cooldown | | ------------- | ----------------- | ------------------------------------------ | -------- | | Slimpajama | 100.0 | 100.0 | 100.0 | ### TinyLlama_v1.1_math_code | Corpus | Basic pretraining | Continual pretraining with specific domain | Cooldown | | ------------- | ----------------- | ------------------------------------------ | -------- | | Slimpajama | 100.0 | 75.0 | 75.0 | | starcoder | - | 15.0 | 15.0 | | proof_pile | - | 10.0 | 10.0 | ### TinyLlama_v1.1_chinese | orpus | Basic pretraining | Continual pretraining with specific domain | Cooldown | | ------------- | ----------------- | ------------------------------------------ | -------- | | Slimpajama | 100.0 | 50.0 | 50.0 | | skypile | - | 50.0 | 50.0 | ### How to use You will need the transformers>=4.31 Do check the [TinyLlama](https://github.com/jzhang38/TinyLlama) GitHub page for more information. ``` from transformers import AutoTokenizer import transformers import torch model = "TinyLlama/TinyLlama_v1.1" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) sequences = pipeline( 'The TinyLlama project aims to pretrain a 1.1B Llama model on 3 trillion tokens. With some proper optimization, we can achieve this within a span of "just" 90 days using 16 A100-40G GPUs πŸš€πŸš€. The training has started on 2023-09-01.', do_sample=True, top_k=10, num_return_sequences=1, repetition_penalty=1.5, eos_token_id=tokenizer.eos_token_id, max_length=500, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` ### Eval | Model | Pretrain Tokens | HellaSwag | Obqa | WinoGrande | ARC_c | ARC_e | boolq | piqa | avg | | ----------------------------------------- | --------------- | --------- | --------- | ---------- | --------- | --------- | ----- | --------- | --------- | | Pythia-1.0B | 300B | 47.16 | 31.40 | 53.43 | 27.05 | 48.99 | 60.83 | 69.21 | 48.30 | | TinyLlama-1.1B-intermediate-step-1431k-3T | 3T | 59.20 | 36.00 | 59.12 | 30.12 | 55.25 | 57.83 | 73.29 | 52.99 | | TinyLlama-1.1B-v1.1 | 2T | **61.47** | **36.80** | 59.43 | 32.68 | **55.47** | 55.99 | **73.56** | 53.63 | | TinyLlama-1.1B-v1_math_code | 2T | 60.80 | 36.40 | **60.22** | **33.87** | 55.20 | 57.09 | 72.69 | **53.75** | | TinyLlama-1.1B-v1.1_chinese | 2T | 58.23 | 35.20 | 59.27 | 31.40 | 55.35 | **61.41** | 73.01 | 53.41 |
blockblockblock/gpt2-bpw6-exl2
blockblockblock
"2024-06-22T21:14:22Z"
0
0
transformers
[ "transformers", "tf", "jax", "tflite", "rust", "gpt2", "text-generation", "exbert", "en", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "6-bit", "exl2", "region:us" ]
text-generation
"2024-06-22T21:13:09Z"
--- language: en tags: - exbert license: mit --- # GPT-2 Test the whole generation capabilities here: https://transformer.huggingface.co/doc/gpt2-large Pretrained model on English language using a causal language modeling (CLM) objective. It was introduced in [this paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) and first released at [this page](https://openai.com/blog/better-language-models/). Disclaimer: The team releasing GPT-2 also wrote a [model card](https://github.com/openai/gpt-2/blob/master/model_card.md) for their model. Content from this model card has been written by the Hugging Face team to complete the information they provided and give specific examples of bias. ## Model description GPT-2 is a transformers model pretrained on a very large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was trained to guess the next word in sentences. More precisely, inputs are sequences of continuous text of a certain length and the targets are the same sequence, shifted one token (word or piece of word) to the right. The model uses internally a mask-mechanism to make sure the predictions for the token `i` only uses the inputs from `1` to `i` but not the future tokens. This way, the model learns an inner representation of the English language that can then be used to extract features useful for downstream tasks. The model is best at what it was pretrained for however, which is generating texts from a prompt. This is the **smallest** version of GPT-2, with 124M parameters. **Related Models:** [GPT-Large](https://huggingface.co/gpt2-large), [GPT-Medium](https://huggingface.co/gpt2-medium) and [GPT-XL](https://huggingface.co/gpt2-xl) ## Intended uses & limitations You can use the raw model for text generation or fine-tune it to a downstream task. See the [model hub](https://huggingface.co/models?filter=gpt2) to look for fine-tuned versions on a task that interests you. ### How to use You can use this model directly with a pipeline for text generation. Since the generation relies on some randomness, we set a seed for reproducibility: ```python >>> from transformers import pipeline, set_seed >>> generator = pipeline('text-generation', model='gpt2') >>> set_seed(42) >>> generator("Hello, I'm a language model,", max_length=30, num_return_sequences=5) [{'generated_text': "Hello, I'm a language model, a language for thinking, a language for expressing thoughts."}, {'generated_text': "Hello, I'm a language model, a compiler, a compiler library, I just want to know how I build this kind of stuff. I don"}, {'generated_text': "Hello, I'm a language model, and also have more than a few of your own, but I understand that they're going to need some help"}, {'generated_text': "Hello, I'm a language model, a system model. I want to know my language so that it might be more interesting, more user-friendly"}, {'generated_text': 'Hello, I\'m a language model, not a language model"\n\nThe concept of "no-tricks" comes in handy later with new'}] ``` Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import GPT2Tokenizer, GPT2Model tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import GPT2Tokenizer, TFGPT2Model tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = TFGPT2Model.from_pretrained('gpt2') text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ### Limitations and bias The training data used for this model has not been released as a dataset one can browse. We know it contains a lot of unfiltered content from the internet, which is far from neutral. As the openAI team themselves point out in their [model card](https://github.com/openai/gpt-2/blob/master/model_card.md#out-of-scope-use-cases): > Because large-scale language models like GPT-2 do not distinguish fact from fiction, we don’t support use-cases > that require the generated text to be true. > > Additionally, language models like GPT-2 reflect the biases inherent to the systems they were trained on, so we do > not recommend that they be deployed into systems that interact with humans > unless the deployers first carry out a > study of biases relevant to the intended use-case. We found no statistically significant difference in gender, race, > and religious bias probes between 774M and 1.5B, implying all versions of GPT-2 should be approached with similar > levels of caution around use cases that are sensitive to biases around human attributes. Here's an example of how the model can have biased predictions: ```python >>> from transformers import pipeline, set_seed >>> generator = pipeline('text-generation', model='gpt2') >>> set_seed(42) >>> generator("The White man worked as a", max_length=10, num_return_sequences=5) [{'generated_text': 'The White man worked as a mannequin for'}, {'generated_text': 'The White man worked as a maniser of the'}, {'generated_text': 'The White man worked as a bus conductor by day'}, {'generated_text': 'The White man worked as a plumber at the'}, {'generated_text': 'The White man worked as a journalist. He had'}] >>> set_seed(42) >>> generator("The Black man worked as a", max_length=10, num_return_sequences=5) [{'generated_text': 'The Black man worked as a man at a restaurant'}, {'generated_text': 'The Black man worked as a car salesman in a'}, {'generated_text': 'The Black man worked as a police sergeant at the'}, {'generated_text': 'The Black man worked as a man-eating monster'}, {'generated_text': 'The Black man worked as a slave, and was'}] ``` This bias will also affect all fine-tuned versions of this model. ## Training data The OpenAI team wanted to train this model on a corpus as large as possible. To build it, they scraped all the web pages from outbound links on Reddit which received at least 3 karma. Note that all Wikipedia pages were removed from this dataset, so the model was not trained on any part of Wikipedia. The resulting dataset (called WebText) weights 40GB of texts but has not been publicly released. You can find a list of the top 1,000 domains present in WebText [here](https://github.com/openai/gpt-2/blob/master/domains.txt). ## Training procedure ### Preprocessing The texts are tokenized using a byte-level version of Byte Pair Encoding (BPE) (for unicode characters) and a vocabulary size of 50,257. The inputs are sequences of 1024 consecutive tokens. The larger model was trained on 256 cloud TPU v3 cores. The training duration was not disclosed, nor were the exact details of training. ## Evaluation results The model achieves the following results without any fine-tuning (zero-shot): | Dataset | LAMBADA | LAMBADA | CBT-CN | CBT-NE | WikiText2 | PTB | enwiki8 | text8 | WikiText103 | 1BW | |:--------:|:-------:|:-------:|:------:|:------:|:---------:|:------:|:-------:|:------:|:-----------:|:-----:| | (metric) | (PPL) | (ACC) | (ACC) | (ACC) | (PPL) | (PPL) | (BPB) | (BPC) | (PPL) | (PPL) | | | 35.13 | 45.99 | 87.65 | 83.4 | 29.41 | 65.85 | 1.16 | 1,17 | 37.50 | 75.20 | ### BibTeX entry and citation info ```bibtex @article{radford2019language, title={Language Models are Unsupervised Multitask Learners}, author={Radford, Alec and Wu, Jeff and Child, Rewon and Luan, David and Amodei, Dario and Sutskever, Ilya}, year={2019} } ``` <a href="https://huggingface.co/exbert/?model=gpt2"> <img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png"> </a>
blockblockblock/gpt2-bpw5.5-exl2
blockblockblock
"2024-06-22T21:16:49Z"
0
0
transformers
[ "transformers", "tf", "jax", "tflite", "rust", "gpt2", "text-generation", "exbert", "en", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "exl2", "region:us" ]
text-generation
"2024-06-22T21:15:32Z"
--- language: en tags: - exbert license: mit --- # GPT-2 Test the whole generation capabilities here: https://transformer.huggingface.co/doc/gpt2-large Pretrained model on English language using a causal language modeling (CLM) objective. It was introduced in [this paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) and first released at [this page](https://openai.com/blog/better-language-models/). Disclaimer: The team releasing GPT-2 also wrote a [model card](https://github.com/openai/gpt-2/blob/master/model_card.md) for their model. Content from this model card has been written by the Hugging Face team to complete the information they provided and give specific examples of bias. ## Model description GPT-2 is a transformers model pretrained on a very large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was trained to guess the next word in sentences. More precisely, inputs are sequences of continuous text of a certain length and the targets are the same sequence, shifted one token (word or piece of word) to the right. The model uses internally a mask-mechanism to make sure the predictions for the token `i` only uses the inputs from `1` to `i` but not the future tokens. This way, the model learns an inner representation of the English language that can then be used to extract features useful for downstream tasks. The model is best at what it was pretrained for however, which is generating texts from a prompt. This is the **smallest** version of GPT-2, with 124M parameters. **Related Models:** [GPT-Large](https://huggingface.co/gpt2-large), [GPT-Medium](https://huggingface.co/gpt2-medium) and [GPT-XL](https://huggingface.co/gpt2-xl) ## Intended uses & limitations You can use the raw model for text generation or fine-tune it to a downstream task. See the [model hub](https://huggingface.co/models?filter=gpt2) to look for fine-tuned versions on a task that interests you. ### How to use You can use this model directly with a pipeline for text generation. Since the generation relies on some randomness, we set a seed for reproducibility: ```python >>> from transformers import pipeline, set_seed >>> generator = pipeline('text-generation', model='gpt2') >>> set_seed(42) >>> generator("Hello, I'm a language model,", max_length=30, num_return_sequences=5) [{'generated_text': "Hello, I'm a language model, a language for thinking, a language for expressing thoughts."}, {'generated_text': "Hello, I'm a language model, a compiler, a compiler library, I just want to know how I build this kind of stuff. I don"}, {'generated_text': "Hello, I'm a language model, and also have more than a few of your own, but I understand that they're going to need some help"}, {'generated_text': "Hello, I'm a language model, a system model. I want to know my language so that it might be more interesting, more user-friendly"}, {'generated_text': 'Hello, I\'m a language model, not a language model"\n\nThe concept of "no-tricks" comes in handy later with new'}] ``` Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import GPT2Tokenizer, GPT2Model tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import GPT2Tokenizer, TFGPT2Model tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = TFGPT2Model.from_pretrained('gpt2') text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ### Limitations and bias The training data used for this model has not been released as a dataset one can browse. We know it contains a lot of unfiltered content from the internet, which is far from neutral. As the openAI team themselves point out in their [model card](https://github.com/openai/gpt-2/blob/master/model_card.md#out-of-scope-use-cases): > Because large-scale language models like GPT-2 do not distinguish fact from fiction, we don’t support use-cases > that require the generated text to be true. > > Additionally, language models like GPT-2 reflect the biases inherent to the systems they were trained on, so we do > not recommend that they be deployed into systems that interact with humans > unless the deployers first carry out a > study of biases relevant to the intended use-case. We found no statistically significant difference in gender, race, > and religious bias probes between 774M and 1.5B, implying all versions of GPT-2 should be approached with similar > levels of caution around use cases that are sensitive to biases around human attributes. Here's an example of how the model can have biased predictions: ```python >>> from transformers import pipeline, set_seed >>> generator = pipeline('text-generation', model='gpt2') >>> set_seed(42) >>> generator("The White man worked as a", max_length=10, num_return_sequences=5) [{'generated_text': 'The White man worked as a mannequin for'}, {'generated_text': 'The White man worked as a maniser of the'}, {'generated_text': 'The White man worked as a bus conductor by day'}, {'generated_text': 'The White man worked as a plumber at the'}, {'generated_text': 'The White man worked as a journalist. He had'}] >>> set_seed(42) >>> generator("The Black man worked as a", max_length=10, num_return_sequences=5) [{'generated_text': 'The Black man worked as a man at a restaurant'}, {'generated_text': 'The Black man worked as a car salesman in a'}, {'generated_text': 'The Black man worked as a police sergeant at the'}, {'generated_text': 'The Black man worked as a man-eating monster'}, {'generated_text': 'The Black man worked as a slave, and was'}] ``` This bias will also affect all fine-tuned versions of this model. ## Training data The OpenAI team wanted to train this model on a corpus as large as possible. To build it, they scraped all the web pages from outbound links on Reddit which received at least 3 karma. Note that all Wikipedia pages were removed from this dataset, so the model was not trained on any part of Wikipedia. The resulting dataset (called WebText) weights 40GB of texts but has not been publicly released. You can find a list of the top 1,000 domains present in WebText [here](https://github.com/openai/gpt-2/blob/master/domains.txt). ## Training procedure ### Preprocessing The texts are tokenized using a byte-level version of Byte Pair Encoding (BPE) (for unicode characters) and a vocabulary size of 50,257. The inputs are sequences of 1024 consecutive tokens. The larger model was trained on 256 cloud TPU v3 cores. The training duration was not disclosed, nor were the exact details of training. ## Evaluation results The model achieves the following results without any fine-tuning (zero-shot): | Dataset | LAMBADA | LAMBADA | CBT-CN | CBT-NE | WikiText2 | PTB | enwiki8 | text8 | WikiText103 | 1BW | |:--------:|:-------:|:-------:|:------:|:------:|:---------:|:------:|:-------:|:------:|:-----------:|:-----:| | (metric) | (PPL) | (ACC) | (ACC) | (ACC) | (PPL) | (PPL) | (BPB) | (BPC) | (PPL) | (PPL) | | | 35.13 | 45.99 | 87.65 | 83.4 | 29.41 | 65.85 | 1.16 | 1,17 | 37.50 | 75.20 | ### BibTeX entry and citation info ```bibtex @article{radford2019language, title={Language Models are Unsupervised Multitask Learners}, author={Radford, Alec and Wu, Jeff and Child, Rewon and Luan, David and Amodei, Dario and Sutskever, Ilya}, year={2019} } ``` <a href="https://huggingface.co/exbert/?model=gpt2"> <img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png"> </a>
blockblockblock/gpt2-bpw5-exl2
blockblockblock
"2024-06-22T21:19:18Z"
0
0
transformers
[ "transformers", "tf", "jax", "tflite", "rust", "gpt2", "text-generation", "exbert", "en", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "5-bit", "exl2", "region:us" ]
text-generation
"2024-06-22T21:17:59Z"
--- language: en tags: - exbert license: mit --- # GPT-2 Test the whole generation capabilities here: https://transformer.huggingface.co/doc/gpt2-large Pretrained model on English language using a causal language modeling (CLM) objective. It was introduced in [this paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) and first released at [this page](https://openai.com/blog/better-language-models/). Disclaimer: The team releasing GPT-2 also wrote a [model card](https://github.com/openai/gpt-2/blob/master/model_card.md) for their model. Content from this model card has been written by the Hugging Face team to complete the information they provided and give specific examples of bias. ## Model description GPT-2 is a transformers model pretrained on a very large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was trained to guess the next word in sentences. More precisely, inputs are sequences of continuous text of a certain length and the targets are the same sequence, shifted one token (word or piece of word) to the right. The model uses internally a mask-mechanism to make sure the predictions for the token `i` only uses the inputs from `1` to `i` but not the future tokens. This way, the model learns an inner representation of the English language that can then be used to extract features useful for downstream tasks. The model is best at what it was pretrained for however, which is generating texts from a prompt. This is the **smallest** version of GPT-2, with 124M parameters. **Related Models:** [GPT-Large](https://huggingface.co/gpt2-large), [GPT-Medium](https://huggingface.co/gpt2-medium) and [GPT-XL](https://huggingface.co/gpt2-xl) ## Intended uses & limitations You can use the raw model for text generation or fine-tune it to a downstream task. See the [model hub](https://huggingface.co/models?filter=gpt2) to look for fine-tuned versions on a task that interests you. ### How to use You can use this model directly with a pipeline for text generation. Since the generation relies on some randomness, we set a seed for reproducibility: ```python >>> from transformers import pipeline, set_seed >>> generator = pipeline('text-generation', model='gpt2') >>> set_seed(42) >>> generator("Hello, I'm a language model,", max_length=30, num_return_sequences=5) [{'generated_text': "Hello, I'm a language model, a language for thinking, a language for expressing thoughts."}, {'generated_text': "Hello, I'm a language model, a compiler, a compiler library, I just want to know how I build this kind of stuff. I don"}, {'generated_text': "Hello, I'm a language model, and also have more than a few of your own, but I understand that they're going to need some help"}, {'generated_text': "Hello, I'm a language model, a system model. I want to know my language so that it might be more interesting, more user-friendly"}, {'generated_text': 'Hello, I\'m a language model, not a language model"\n\nThe concept of "no-tricks" comes in handy later with new'}] ``` Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import GPT2Tokenizer, GPT2Model tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import GPT2Tokenizer, TFGPT2Model tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = TFGPT2Model.from_pretrained('gpt2') text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ### Limitations and bias The training data used for this model has not been released as a dataset one can browse. We know it contains a lot of unfiltered content from the internet, which is far from neutral. As the openAI team themselves point out in their [model card](https://github.com/openai/gpt-2/blob/master/model_card.md#out-of-scope-use-cases): > Because large-scale language models like GPT-2 do not distinguish fact from fiction, we don’t support use-cases > that require the generated text to be true. > > Additionally, language models like GPT-2 reflect the biases inherent to the systems they were trained on, so we do > not recommend that they be deployed into systems that interact with humans > unless the deployers first carry out a > study of biases relevant to the intended use-case. We found no statistically significant difference in gender, race, > and religious bias probes between 774M and 1.5B, implying all versions of GPT-2 should be approached with similar > levels of caution around use cases that are sensitive to biases around human attributes. Here's an example of how the model can have biased predictions: ```python >>> from transformers import pipeline, set_seed >>> generator = pipeline('text-generation', model='gpt2') >>> set_seed(42) >>> generator("The White man worked as a", max_length=10, num_return_sequences=5) [{'generated_text': 'The White man worked as a mannequin for'}, {'generated_text': 'The White man worked as a maniser of the'}, {'generated_text': 'The White man worked as a bus conductor by day'}, {'generated_text': 'The White man worked as a plumber at the'}, {'generated_text': 'The White man worked as a journalist. He had'}] >>> set_seed(42) >>> generator("The Black man worked as a", max_length=10, num_return_sequences=5) [{'generated_text': 'The Black man worked as a man at a restaurant'}, {'generated_text': 'The Black man worked as a car salesman in a'}, {'generated_text': 'The Black man worked as a police sergeant at the'}, {'generated_text': 'The Black man worked as a man-eating monster'}, {'generated_text': 'The Black man worked as a slave, and was'}] ``` This bias will also affect all fine-tuned versions of this model. ## Training data The OpenAI team wanted to train this model on a corpus as large as possible. To build it, they scraped all the web pages from outbound links on Reddit which received at least 3 karma. Note that all Wikipedia pages were removed from this dataset, so the model was not trained on any part of Wikipedia. The resulting dataset (called WebText) weights 40GB of texts but has not been publicly released. You can find a list of the top 1,000 domains present in WebText [here](https://github.com/openai/gpt-2/blob/master/domains.txt). ## Training procedure ### Preprocessing The texts are tokenized using a byte-level version of Byte Pair Encoding (BPE) (for unicode characters) and a vocabulary size of 50,257. The inputs are sequences of 1024 consecutive tokens. The larger model was trained on 256 cloud TPU v3 cores. The training duration was not disclosed, nor were the exact details of training. ## Evaluation results The model achieves the following results without any fine-tuning (zero-shot): | Dataset | LAMBADA | LAMBADA | CBT-CN | CBT-NE | WikiText2 | PTB | enwiki8 | text8 | WikiText103 | 1BW | |:--------:|:-------:|:-------:|:------:|:------:|:---------:|:------:|:-------:|:------:|:-----------:|:-----:| | (metric) | (PPL) | (ACC) | (ACC) | (ACC) | (PPL) | (PPL) | (BPB) | (BPC) | (PPL) | (PPL) | | | 35.13 | 45.99 | 87.65 | 83.4 | 29.41 | 65.85 | 1.16 | 1,17 | 37.50 | 75.20 | ### BibTeX entry and citation info ```bibtex @article{radford2019language, title={Language Models are Unsupervised Multitask Learners}, author={Radford, Alec and Wu, Jeff and Child, Rewon and Luan, David and Amodei, Dario and Sutskever, Ilya}, year={2019} } ``` <a href="https://huggingface.co/exbert/?model=gpt2"> <img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png"> </a>
manbeast3b/KinoInferTry10
manbeast3b
"2024-06-22T21:21:07Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T21:21:02Z"
Entry not found
AmiraMohamed/results
AmiraMohamed
"2024-06-22T21:23:12Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T21:23:12Z"
Entry not found
kanishka/smolm-autoreg-bpe-counterfactual_babylm_aann_indef_articles_with_pl_nouns_removal_new-1e-4
kanishka
"2024-06-23T20:04:41Z"
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "opt", "text-generation", "generated_from_trainer", "dataset:kanishka/counterfactual_babylm_aann_indef_articles_with_pl_nouns_removal_new", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2024-06-22T21:23:34Z"
--- tags: - generated_from_trainer datasets: - kanishka/counterfactual_babylm_aann_indef_articles_with_pl_nouns_removal_new metrics: - accuracy model-index: - name: smolm-autoreg-bpe-counterfactual_babylm_aann_indef_articles_with_pl_nouns_removal_new-1e-4 results: - task: name: Causal Language Modeling type: text-generation dataset: name: kanishka/counterfactual_babylm_aann_indef_articles_with_pl_nouns_removal_new type: kanishka/counterfactual_babylm_aann_indef_articles_with_pl_nouns_removal_new metrics: - name: Accuracy type: accuracy value: 0.4063316905647398 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # smolm-autoreg-bpe-counterfactual_babylm_aann_indef_articles_with_pl_nouns_removal_new-1e-4 This model was trained from scratch on the kanishka/counterfactual_babylm_aann_indef_articles_with_pl_nouns_removal_new dataset. It achieves the following results on the evaluation set: - Loss: 3.4333 - Accuracy: 0.4063 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 32 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 32000 - num_epochs: 20.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:------:|:---------------:|:--------:| | 4.0479 | 1.0 | 18600 | 4.2935 | 0.3070 | | 3.5674 | 2.0 | 37200 | 3.7738 | 0.3611 | | 3.3916 | 3.0 | 55800 | 3.6022 | 0.3786 | | 3.2862 | 4.0 | 74400 | 3.5242 | 0.3885 | | 3.2206 | 5.0 | 93000 | 3.4933 | 0.3925 | | 3.1712 | 6.0 | 111600 | 3.4670 | 0.3960 | | 3.1308 | 7.0 | 130200 | 3.4515 | 0.3982 | | 3.0923 | 8.0 | 148800 | 3.4287 | 0.4002 | | 3.0627 | 9.0 | 167400 | 3.4128 | 0.4021 | | 3.0371 | 10.0 | 186000 | 3.4146 | 0.4029 | | 3.0079 | 11.0 | 204600 | 3.4136 | 0.4033 | | 2.9826 | 12.0 | 223200 | 3.4180 | 0.4040 | | 2.9648 | 13.0 | 241800 | 3.3980 | 0.4056 | | 2.9463 | 14.0 | 260400 | 3.4089 | 0.4059 | | 2.9268 | 15.0 | 279000 | 3.4190 | 0.4056 | | 2.9079 | 16.0 | 297600 | 3.4242 | 0.4058 | | 2.8863 | 17.0 | 316200 | 3.4218 | 0.4062 | | 2.8721 | 18.0 | 334800 | 3.4296 | 0.4062 | | 2.8514 | 19.0 | 353400 | 3.4306 | 0.4064 | | 2.8356 | 20.0 | 372000 | 3.4333 | 0.4063 | ### Framework versions - Transformers 4.38.0 - Pytorch 2.3.1+cu121 - Datasets 2.16.1 - Tokenizers 0.15.2
f4b1an/leaders
f4b1an
"2024-06-22T21:26:55Z"
0
0
null
[ "license:creativeml-openrail-m", "region:us" ]
null
"2024-06-22T21:25:25Z"
--- license: creativeml-openrail-m ---
kanishka/smolm-autoreg-bpe-counterfactual_babylm_measure_nps_as_singular_new-seed_1024-1e-3
kanishka
"2024-06-28T01:45:41Z"
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "opt", "text-generation", "generated_from_trainer", "dataset:kanishka/counterfactual_babylm_measure_nps_as_singular_new", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2024-06-22T21:28:30Z"
--- tags: - generated_from_trainer datasets: - kanishka/counterfactual_babylm_measure_nps_as_singular_new metrics: - accuracy model-index: - name: smolm-autoreg-bpe-counterfactual_babylm_measure_nps_as_singular_new-seed_1024-1e-3 results: - task: name: Causal Language Modeling type: text-generation dataset: name: kanishka/counterfactual_babylm_measure_nps_as_singular_new type: kanishka/counterfactual_babylm_measure_nps_as_singular_new metrics: - name: Accuracy type: accuracy value: 0.40919192430156276 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # smolm-autoreg-bpe-counterfactual_babylm_measure_nps_as_singular_new-seed_1024-1e-3 This model was trained from scratch on the kanishka/counterfactual_babylm_measure_nps_as_singular_new dataset. It achieves the following results on the evaluation set: - Loss: 3.4280 - Accuracy: 0.4092 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 32 - eval_batch_size: 64 - seed: 1024 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 32000 - num_epochs: 20.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:------:|:---------------:|:--------:| | 3.5991 | 1.0 | 18602 | 3.7770 | 0.3585 | | 3.3808 | 2.0 | 37204 | 3.5764 | 0.3802 | | 3.2544 | 3.0 | 55806 | 3.4559 | 0.3923 | | 3.168 | 4.0 | 74408 | 3.4507 | 0.3977 | | 3.1158 | 5.0 | 93010 | 3.4035 | 0.4024 | | 3.0757 | 6.0 | 111612 | 3.3938 | 0.4040 | | 3.0394 | 7.0 | 130214 | 3.3852 | 0.4048 | | 3.0038 | 8.0 | 148816 | 3.3858 | 0.4069 | | 2.9731 | 9.0 | 167418 | 3.3887 | 0.4073 | | 2.9471 | 10.0 | 186020 | 3.3704 | 0.4084 | | 2.9246 | 11.0 | 204622 | 3.3703 | 0.4090 | | 2.904 | 12.0 | 223224 | 3.3839 | 0.4087 | | 2.8815 | 13.0 | 241826 | 3.3820 | 0.4095 | | 2.8618 | 14.0 | 260428 | 3.3779 | 0.4094 | | 2.8451 | 15.0 | 279030 | 3.4014 | 0.4096 | | 2.8223 | 16.0 | 297632 | 3.4042 | 0.4095 | | 2.8049 | 17.0 | 316234 | 3.4128 | 0.4093 | | 2.7864 | 18.0 | 334836 | 3.4110 | 0.4092 | | 2.7622 | 19.0 | 353438 | 3.4278 | 0.4091 | | 2.7455 | 20.0 | 372040 | 3.4280 | 0.4092 | ### Framework versions - Transformers 4.38.0 - Pytorch 2.3.1+cu121 - Datasets 2.16.1 - Tokenizers 0.15.2
jenniecoveria/CHAEYOUNG_MODEL
jenniecoveria
"2024-06-22T21:36:18Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T21:35:15Z"
Entry not found
fruk19/E_SMALL
fruk19
"2024-06-22T21:36:18Z"
0
0
transformers
[ "transformers", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
"2024-06-22T21:36:15Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a πŸ€— transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
Fischerboot/sophie-2epochs
Fischerboot
"2024-06-22T21:49:09Z"
0
0
peft
[ "peft", "llama", "generated_from_trainer", "base_model:Fischerboot/LLama3-Lexi-Aura-3Some-SLERP-SLERP-ql-merge", "4-bit", "bitsandbytes", "region:us" ]
null
"2024-06-22T21:36:16Z"
--- base_model: Fischerboot/LLama3-Lexi-Aura-3Some-SLERP-SLERP-ql-merge library_name: peft tags: - generated_from_trainer model-index: - name: outputs/128-rank-2-epochs results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl) <details><summary>See axolotl config</summary> axolotl version: `0.4.1` ```yaml base_model: Fischerboot/LLama3-Lexi-Aura-3Some-SLERP-SLERP-ql-merge model_type: LlamaForCausalLM tokenizer_type: AutoTokenizer load_in_8bit: false load_in_4bit: true strict: false chat_template: llama3 datasets: - path: Fischerboot/newnewdataset-sophie type: sharegpt - path: PJMixers/grimulkan_theory-of-mind-ShareGPT type: sharegpt conversation: llama3 dataset_prepared_path: last_run_prepared val_set_size: 0.1 output_dir: ./outputs/128-rank-2-epochs adapter: qlora lora_model_dir: sequence_len: 128 sample_packing: false pad_to_sequence_len: true lora_r: 128 lora_alpha: 64 lora_dropout: 0.05 lora_target_linear: true lora_fan_in_fan_out: lora_target_modules: - gate_proj - down_proj - up_proj - q_proj - v_proj - k_proj - o_proj wandb_project: wandb_entity: wandb_watch: wandb_name: wandb_log_model: gradient_accumulation_steps: 1 micro_batch_size: 1 num_epochs: 2 optimizer: adamw_bnb_8bit lr_scheduler: cosine learning_rate: 0.0002 train_on_inputs: false group_by_length: false bf16: auto fp16: tf32: false gradient_checkpointing: true early_stopping_patience: resume_from_checkpoint: local_rank: logging_steps: 1 xformers_attention: flash_attention: true loss_watchdog_threshold: 8.0 loss_watchdog_patience: 3 eval_sample_packing: false warmup_steps: 10 evals_per_epoch: 4 eval_table_size: eval_max_new_tokens: 128 saves_per_epoch: 1 debug: deepspeed: weight_decay: 0.0 fsdp: fsdp_config: special_tokens: bos_token: "<|begin_of_text|>" eos_token: "<|end_of_text|>" pad_token: "<|end_of_text|>" ``` </details><br> # outputs/128-rank-2-epochs This model is a fine-tuned version of [Fischerboot/LLama3-Lexi-Aura-3Some-SLERP-SLERP-ql-merge](https://huggingface.co/Fischerboot/LLama3-Lexi-Aura-3Some-SLERP-SLERP-ql-merge) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2742 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 10 - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 6.1066 | 0.0034 | 1 | 6.0671 | | 0.2094 | 0.2526 | 74 | 0.4514 | | 0.233 | 0.5051 | 148 | 0.4296 | | 0.1455 | 0.7577 | 222 | 0.3652 | | 0.3526 | 1.0102 | 296 | 0.3093 | | 0.1945 | 1.2628 | 370 | 0.2894 | | 0.2446 | 1.5154 | 444 | 0.2882 | | 0.2686 | 1.7679 | 518 | 0.2742 | ### Framework versions - PEFT 0.11.1 - Transformers 4.41.1 - Pytorch 2.1.2+cu118 - Datasets 2.19.1 - Tokenizers 0.19.1
Fischerboot/sophie-3epochs
Fischerboot
"2024-06-22T22:01:20Z"
0
0
peft
[ "peft", "llama", "generated_from_trainer", "base_model:Fischerboot/LLama3-Lexi-Aura-3Some-SLERP-SLERP-ql-merge", "4-bit", "bitsandbytes", "region:us" ]
null
"2024-06-22T21:36:30Z"
--- base_model: Fischerboot/LLama3-Lexi-Aura-3Some-SLERP-SLERP-ql-merge library_name: peft tags: - generated_from_trainer model-index: - name: outputs/128-rank-3-epochs results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl) <details><summary>See axolotl config</summary> axolotl version: `0.4.1` ```yaml base_model: Fischerboot/LLama3-Lexi-Aura-3Some-SLERP-SLERP-ql-merge model_type: LlamaForCausalLM tokenizer_type: AutoTokenizer load_in_8bit: false load_in_4bit: true strict: false chat_template: llama3 datasets: - path: Fischerboot/newnewdataset-sophie type: sharegpt - path: PJMixers/grimulkan_theory-of-mind-ShareGPT type: sharegpt conversation: llama3 dataset_prepared_path: last_run_prepared val_set_size: 0.1 output_dir: ./outputs/128-rank-3-epochs adapter: qlora lora_model_dir: sequence_len: 128 sample_packing: false pad_to_sequence_len: true lora_r: 128 lora_alpha: 64 lora_dropout: 0.05 lora_target_linear: true lora_fan_in_fan_out: lora_target_modules: - gate_proj - down_proj - up_proj - q_proj - v_proj - k_proj - o_proj wandb_project: wandb_entity: wandb_watch: wandb_name: wandb_log_model: gradient_accumulation_steps: 1 micro_batch_size: 1 num_epochs: 3 optimizer: adamw_bnb_8bit lr_scheduler: cosine learning_rate: 0.0002 train_on_inputs: false group_by_length: false bf16: auto fp16: tf32: false gradient_checkpointing: true early_stopping_patience: resume_from_checkpoint: local_rank: logging_steps: 1 xformers_attention: flash_attention: true loss_watchdog_threshold: 8.0 loss_watchdog_patience: 3 eval_sample_packing: false warmup_steps: 10 evals_per_epoch: 4 eval_table_size: eval_max_new_tokens: 128 saves_per_epoch: 1 debug: deepspeed: weight_decay: 0.0 fsdp: fsdp_config: special_tokens: bos_token: "<|begin_of_text|>" eos_token: "<|end_of_text|>" pad_token: "<|end_of_text|>" ``` </details><br> # outputs/128-rank-3-epochs This model is a fine-tuned version of [Fischerboot/LLama3-Lexi-Aura-3Some-SLERP-SLERP-ql-merge](https://huggingface.co/Fischerboot/LLama3-Lexi-Aura-3Some-SLERP-SLERP-ql-merge) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2591 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 10 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 6.1066 | 0.0034 | 1 | 6.0671 | | 0.1798 | 0.2526 | 74 | 0.5265 | | 0.2133 | 0.5051 | 148 | 0.3970 | | 0.1422 | 0.7577 | 222 | 0.3722 | | 0.4075 | 1.0102 | 296 | 0.3192 | | 0.2602 | 1.2628 | 370 | 0.3238 | | 0.3284 | 1.5154 | 444 | 0.3140 | | 0.3468 | 1.7679 | 518 | 0.3076 | | 0.2113 | 2.0205 | 592 | 0.2838 | | 0.1598 | 2.2730 | 666 | 0.2659 | | 0.1228 | 2.5256 | 740 | 0.2609 | | 0.1164 | 2.7782 | 814 | 0.2591 | ### Framework versions - PEFT 0.11.1 - Transformers 4.41.1 - Pytorch 2.1.2+cu118 - Datasets 2.19.1 - Tokenizers 0.19.1
Susanta21/airesume
Susanta21
"2024-06-22T21:36:39Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T21:36:39Z"
Entry not found
odelz/eng_fb1mms_balancedv2
odelz
"2024-06-22T21:38:02Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T21:38:02Z"
Entry not found
MalakBasaad/WizardLM-13b-V1.2-Generating-Typo-Squatted-Domain-names
MalakBasaad
"2024-06-26T03:15:15Z"
0
0
transformers
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
"2024-06-22T21:39:40Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a πŸ€— transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
ErikGG64/Kurt_1994_Studio_Voice
ErikGG64
"2024-06-22T21:46:18Z"
0
0
null
[ "license:openrail", "region:us" ]
null
"2024-06-22T21:42:09Z"
--- license: openrail ---
JemimaA/fifa-regression-ensemble
JemimaA
"2024-06-22T22:17:33Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T21:43:20Z"
Entry not found
Milllllllladddddd/Aaas
Milllllllladddddd
"2024-06-22T21:44:12Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T21:44:12Z"
Entry not found
Danikdsa/JENNIE
Danikdsa
"2024-06-22T21:54:02Z"
0
0
null
[ "license:openrail", "region:us" ]
null
"2024-06-22T21:53:38Z"
--- license: openrail ---
RimZrelli/CTL_12Fold_LLAMA3_BIG
RimZrelli
"2024-06-22T21:58:32Z"
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "text-generation-inference", "unsloth", "trl", "sft", "conversational", "en", "base_model:unsloth/llama-3-8b-Instruct-bnb-4bit", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
"2024-06-22T21:54:19Z"
--- base_model: unsloth/llama-3-8b-Instruct-bnb-4bit language: - en license: apache-2.0 tags: - text-generation-inference - transformers - unsloth - llama - trl - sft --- # Uploaded model - **Developed by:** RimZrelli - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-Instruct-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
odelz/hindi_fb1mms_unbalanced
odelz
"2024-06-23T01:28:19Z"
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "wav2vec2", "automatic-speech-recognition", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
"2024-06-22T21:58:53Z"
Entry not found
diepala/rl_course_vizdoom_health_gathering_supreme
diepala
"2024-06-22T22:03:13Z"
0
0
sample-factory
[ "sample-factory", "tensorboard", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
"2024-06-22T21:59:19Z"
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 14.10 +/- 5.14 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r diepala/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m .usr.local.lib.python3.10.dist-packages.colab_kernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m .usr.local.lib.python3.10.dist-packages.colab_kernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
AIModelTechnologies/Bad-Bunny
AIModelTechnologies
"2024-06-23T04:42:27Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T22:01:58Z"
Entry not found
nolangclem/juggerXL_inpaint
nolangclem
"2024-06-22T22:02:43Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T22:02:43Z"
Entry not found
markveillette/unet-vda24
markveillette
"2024-06-22T22:03:36Z"
0
0
null
[ "license:mit", "region:us" ]
null
"2024-06-22T22:03:36Z"
--- license: mit ---
izzy2pm/LfromINFINITE
izzy2pm
"2024-06-22T22:10:51Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T22:07:50Z"
Entry not found
usuario101/vcard
usuario101
"2024-06-22T22:09:56Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T22:09:55Z"
Entry not found
MaryamMaksour/distilhubert-finetuned-gtzan
MaryamMaksour
"2024-06-23T00:08:28Z"
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "hubert", "audio-classification", "generated_from_trainer", "dataset:marsyas/gtzan", "base_model:ntu-spml/distilhubert", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
audio-classification
"2024-06-22T22:11:34Z"
--- license: apache-2.0 base_model: ntu-spml/distilhubert tags: - generated_from_trainer datasets: - marsyas/gtzan metrics: - accuracy model-index: - name: distilhubert-finetuned-gtzan results: - task: name: Audio Classification type: audio-classification dataset: name: GTZAN type: marsyas/gtzan config: all split: train args: all metrics: - name: Accuracy type: accuracy value: 0.82 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilhubert-finetuned-gtzan This model is a fine-tuned version of [ntu-spml/distilhubert](https://huggingface.co/ntu-spml/distilhubert) on the GTZAN dataset. It achieves the following results on the evaluation set: - Loss: 0.6202 - Accuracy: 0.82 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.9394 | 1.0 | 113 | 1.8333 | 0.53 | | 1.2177 | 2.0 | 226 | 1.2561 | 0.66 | | 1.0335 | 3.0 | 339 | 0.9996 | 0.72 | | 0.7175 | 4.0 | 452 | 0.8751 | 0.72 | | 0.432 | 5.0 | 565 | 0.7132 | 0.78 | | 0.3374 | 6.0 | 678 | 0.5706 | 0.82 | | 0.2082 | 7.0 | 791 | 0.6468 | 0.8 | | 0.1584 | 8.0 | 904 | 0.6253 | 0.83 | | 0.1323 | 9.0 | 1017 | 0.6019 | 0.84 | | 0.0957 | 10.0 | 1130 | 0.6202 | 0.82 | ### Framework versions - Transformers 4.41.2 - Pytorch 2.3.0+cu121 - Datasets 2.20.0 - Tokenizers 0.19.1
GeorgiosChris/Thesis
GeorgiosChris
"2024-06-25T11:34:39Z"
0
0
null
[ "safetensors", "license:apache-2.0", "region:us" ]
null
"2024-06-22T22:11:36Z"
--- license: apache-2.0 ---
anon11112/style
anon11112
"2024-06-22T22:24:39Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T22:11:57Z"
Entry not found
pinkieseb/nutrition_model
pinkieseb
"2024-06-22T22:15:19Z"
0
0
transformers
[ "transformers", "pytorch", "onnx", "endpoints_compatible", "region:us" ]
null
"2024-06-22T22:14:22Z"
Entry not found
Sinensis/DarkForest-20B-v3.0-bpw5.0-h6-exl2
Sinensis
"2024-06-22T22:20:19Z"
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "merge", "not-for-all-audiences", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "5-bit", "exl2", "region:us" ]
text-generation
"2024-06-22T22:14:35Z"
--- license: other tags: - merge - not-for-all-audiences license_name: microsoft-research-license --- [exllamav2](https://github.com/turboderp/exllamav2) quant of [TeeZee/DarkForest-20B-v3.0](https://huggingface.co/TeeZee/DarkForest-20B-v3.0) using default calibration. --- # DarkForest 20B v3.0 ![image/png](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/resolve/main/DarkForest-20B-v3.0.jpg) ## Model Details - To create this model five step procedure was used. - The resulting model has approximately 20 billion parameters. - details of merge steps are in files: - [darkforest_v3_step1.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step1.yml) - [darkforest_v3_step2.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step2.yml) - [darkforest_v3_step3.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step3.yml) - [darkforest_v3_step4.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step4.yml) - [darkforest_v3_step5.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step5.yml) ## Models used - custom model, based on athirdpath/Orca-2-13b-Alpaca-Uncensored and KoboldAI/LLaMA2-13B-Erebus-v3 - BigMaid-20B-v2.0 - athirdpath/Harmonia-20B - athirdpath/Iambe-RP-v3-20b ## Models removed - jebcarter_psyonic-cetacean-20B ## Merge method - all merges done in float32 precision, when applicable, breadcrumbs_ties merge method was used. **Warning: This model can produce NSFW content!** ## Results - main difference to v2.x - model follows much better character cards and also user profile. - produces SFW nad NSFW content without issues, switches context seamlessly. - good at following instructions. - good at tracking multiple characters in one scene. - very creative, scenarios produced are mature and complicated, model doesn't shy from writing about PTSD, mental issues or complicated relationships. - NSFW output is more creative and suprising than typical limaRP output. - definitely for mature audiences, not only because of vivid NSFW content but also because of overall maturity of stories it produces. - This is NOT Harry Potter level storytelling. All comments are greatly appreciated, download, test and if you appreciate my work, consider buying me my fuel: <a href="https://www.buymeacoffee.com/TeeZee" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 60px !important;width: 217px !important;" ></a>
Giggimx/Io
Giggimx
"2024-06-22T22:18:56Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T22:18:56Z"
Entry not found
mformoso/ImpuestosUNT
mformoso
"2024-06-22T22:20:42Z"
0
0
null
[ "license:llama3", "region:us" ]
null
"2024-06-22T22:20:42Z"
--- license: llama3 ---
ovite/Whisper-new
ovite
"2024-06-22T22:26:01Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T22:26:01Z"
Entry not found
C-Ilyas/whisper-base-darija
C-Ilyas
"2024-06-22T22:27:00Z"
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "whisper", "automatic-speech-recognition", "generated_from_trainer", "ar", "base_model:openai/whisper-base", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
"2024-06-22T22:26:45Z"
--- language: - ar license: apache-2.0 base_model: openai/whisper-base tags: - generated_from_trainer model-index: - name: Whisper-Base-Darija results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper-Base-Darija This model is a fine-tuned version of [openai/whisper-base](https://huggingface.co/openai/whisper-base) on the Algerian Darija Dialect dataset. It achieves the following results on the evaluation set: - eval_loss: 2.0852 - eval_wer: 243.5823 - eval_runtime: 210.226 - eval_samples_per_second: 0.376 - eval_steps_per_second: 0.376 - epoch: 100.0 - step: 2000 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 300 - training_steps: 3000 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.41.2 - Pytorch 2.3.0+cu121 - Datasets 2.20.0 - Tokenizers 0.19.1
SimoLM/omis
SimoLM
"2024-06-22T22:28:40Z"
0
0
transformers
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "mistral", "trl", "en", "base_model:unsloth/phi-3-medium-4k-instruct-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
"2024-06-22T22:28:10Z"
--- language: - en license: apache-2.0 tags: - text-generation-inference - transformers - unsloth - mistral - trl base_model: unsloth/phi-3-medium-4k-instruct-bnb-4bit --- # Uploaded model - **Developed by:** tferdi - **License:** apache-2.0 - **Finetuned from model :** unsloth/phi-3-medium-4k-instruct-bnb-4bit This mistral model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
Sonikqu/imirr
Sonikqu
"2024-06-22T22:29:08Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T22:29:08Z"
Entry not found
pamelaraya/ModelsXL
pamelaraya
"2024-06-22T22:39:00Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T22:29:55Z"
Entry not found
Astral-P/CheramiLeigh
Astral-P
"2024-06-22T22:33:41Z"
0
0
null
[ "license:wtfpl", "region:us" ]
null
"2024-06-22T22:30:09Z"
--- license: wtfpl ---
Wilaime/model_transfer_football_fake_news
Wilaime
"2024-06-22T22:32:37Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T22:30:19Z"
Entry not found
IMMAYAN/Analizar_Sentimientos
IMMAYAN
"2024-06-22T22:50:34Z"
0
0
null
[ "license:cc-by-nc-4.0", "region:us" ]
null
"2024-06-22T22:33:30Z"
--- title: AnalizarSentimiento emoji: πŸ“ˆ colorFrom: gray colorTo: indigo sdk: gradio sdk_version: 4.36.1 app_file: app.py pinned: false license: cc-by-nc-4.0 --- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
surya-narayanan/business
surya-narayanan
"2024-06-22T23:47:30Z"
0
0
transformers
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
"2024-06-22T22:37:41Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a πŸ€— transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
NKlauu/teste
NKlauu
"2024-06-22T22:40:37Z"
0
0
null
[ "license:unknown", "region:us" ]
null
"2024-06-22T22:40:37Z"
--- license: unknown ---
Pat2004/Test
Pat2004
"2024-06-23T22:36:22Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T22:42:35Z"
license: apache-2.0 ---
lemonater/aktivist
lemonater
"2024-06-22T22:44:08Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T22:44:08Z"
Entry not found
fivi-luffy/test-model
fivi-luffy
"2024-06-22T22:47:29Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T22:44:30Z"
# Sample README FOR MY MODEL So this is my first model --- license: mit ---
Sonikqu/imirr_ai
Sonikqu
"2024-06-22T22:50:06Z"
0
0
transformers
[ "transformers", "tensorboard", "license:wtfpl", "endpoints_compatible", "region:us" ]
null
"2024-06-22T22:46:29Z"
--- license: wtfpl ---
chrisgio/narrator
chrisgio
"2024-06-22T22:49:57Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T22:49:17Z"
Entry not found
aaalby/ethan1
aaalby
"2024-06-22T22:51:59Z"
0
0
null
[ "license:openrail", "region:us" ]
null
"2024-06-22T22:51:00Z"
--- license: openrail ---
valerielucro/mistral_gsm8k_dpo_cot_beta_0.3
valerielucro
"2024-06-22T22:55:22Z"
0
0
transformers
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
"2024-06-22T22:55:14Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a πŸ€— transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
ceciliaokugo43/gamingstory
ceciliaokugo43
"2024-06-22T22:56:17Z"
0
0
null
[ "license:artistic-2.0", "region:us" ]
null
"2024-06-22T22:56:17Z"
--- license: artistic-2.0 ---
woweenie/v70-ds21-main2-5e6-cd0.02-22kresume-11k-1.5e6-22k-7e7cos-13k-half
woweenie
"2024-06-22T23:00:07Z"
0
0
diffusers
[ "diffusers", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
"2024-06-22T22:57:15Z"
Entry not found
Frixi/BadBunny_2024
Frixi
"2024-06-29T17:18:19Z"
0
0
null
[ "license:openrail", "region:us" ]
null
"2024-06-22T22:57:31Z"
--- license: openrail ---
SoyTuTilin/Tilinazo
SoyTuTilin
"2024-06-22T22:58:14Z"
0
0
null
[ "license:apache-2.0", "region:us" ]
null
"2024-06-22T22:58:14Z"
--- license: apache-2.0 ---
Brahim86599/Words
Brahim86599
"2024-06-22T22:58:21Z"
0
0
null
[ "license:apache-2.0", "region:us" ]
null
"2024-06-22T22:58:21Z"
--- license: apache-2.0 ---
IKenzoI/Ski
IKenzoI
"2024-06-22T22:58:52Z"
0
0
null
[ "region:us" ]
null
"2024-06-22T22:58:52Z"
Entry not found
Khaliladib/llama-3-8b-instruct-vocab-checker
Khaliladib
"2024-06-23T09:29:47Z"
0
0
transformers
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
"2024-06-22T23:01:43Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a πŸ€— transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
tom1-ll/LilyFe3O4Breakera
tom1-ll
"2024-06-22T23:05:31Z"
0
0
null
[ "license:openrail", "region:us" ]
null
"2024-06-22T23:04:00Z"
--- license: openrail ---
Sinensis/DarkForest-20B-v3.0-bpw6.0-h8-exl2
Sinensis
"2024-06-22T23:15:53Z"
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "merge", "not-for-all-audiences", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "6-bit", "exl2", "region:us" ]
text-generation
"2024-06-22T23:09:45Z"
--- license: other tags: - merge - not-for-all-audiences license_name: microsoft-research-license --- [exllamav2](https://github.com/turboderp/exllamav2) quant of [TeeZee/DarkForest-20B-v3.0](https://huggingface.co/TeeZee/DarkForest-20B-v3.0) using default calibration. --- # DarkForest 20B v3.0 ![image/png](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/resolve/main/DarkForest-20B-v3.0.jpg) ## Model Details - To create this model five step procedure was used. - The resulting model has approximately 20 billion parameters. - details of merge steps are in files: - [darkforest_v3_step1.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step1.yml) - [darkforest_v3_step2.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step2.yml) - [darkforest_v3_step3.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step3.yml) - [darkforest_v3_step4.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step4.yml) - [darkforest_v3_step5.yml ](https://huggingface.co/TeeZee/DarkForest-20B-v3.0/blob/main/darkforest_v3_step5.yml) ## Models used - custom model, based on athirdpath/Orca-2-13b-Alpaca-Uncensored and KoboldAI/LLaMA2-13B-Erebus-v3 - BigMaid-20B-v2.0 - athirdpath/Harmonia-20B - athirdpath/Iambe-RP-v3-20b ## Models removed - jebcarter_psyonic-cetacean-20B ## Merge method - all merges done in float32 precision, when applicable, breadcrumbs_ties merge method was used. **Warning: This model can produce NSFW content!** ## Results - main difference to v2.x - model follows much better character cards and also user profile. - produces SFW nad NSFW content without issues, switches context seamlessly. - good at following instructions. - good at tracking multiple characters in one scene. - very creative, scenarios produced are mature and complicated, model doesn't shy from writing about PTSD, mental issues or complicated relationships. - NSFW output is more creative and suprising than typical limaRP output. - definitely for mature audiences, not only because of vivid NSFW content but also because of overall maturity of stories it produces. - This is NOT Harry Potter level storytelling. All comments are greatly appreciated, download, test and if you appreciate my work, consider buying me my fuel: <a href="https://www.buymeacoffee.com/TeeZee" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 60px !important;width: 217px !important;" ></a>
lunnar/nbayoungboy
lunnar
"2024-06-22T23:32:36Z"
0
0
null
[ "license:openrail", "region:us" ]
null
"2024-06-22T23:31:41Z"
--- license: openrail ---
primetimetran/her-breasts-friend
primetimetran
"2024-06-26T06:47:55Z"
0
0
null
[ "image classification", "classification", "medical imaging", "medical", "dicom", "cancer", "en", "license:mit", "region:us" ]
null
"2024-06-22T23:34:53Z"
--- license: mit language: - en tags: - image classification - classification - medical imaging - medical - dicom - cancer metrics: - '62% Sensitivity' --- # HerBreastsFriend(HBF) ![Demo](./assets/imgs-preview.gif) A model for identifying breast cancer in patients inspired by a study conducted by Duke & blogged about by jamanetwork[^1]. Their studies finding's were that there's a lot of room for improvement. They came to this conclusion after building their own AI model for breast cancer detection/prognoses and achieved a 65% on sensitivity. ### Details ![Demo](./assets/matrix-previews.gif) - KNN strategy - n_neighbors=5 - StandardScaler - PCA - n_components=2 - Trained on limited dataset(1997 images) - I had to limit the number of data points in my model because my machine kept freezing. WIP on a solution. - Hosted by the amazing cancerimagingarchive[^2] ### Classification Report The initial release of HBF scored the following in our classification. 62% for average weighted across all features. A lot of room for improvement. ```sh precision recall f1-score support Normal 0 0.62 0.80 0.70 956 Actionable 1 0.61 0.58 0.59 760 Benign 2 0.69 0.07 0.12 164 Cancer 3 0.47 0.08 0.13 117 accuracy 0.62 1997 macro avg 0.60 0.38 0.39 1997 weighted avg 0.61 0.62 0.58 1997 ``` ### FAQ I'm considering making this open source. If you'd like to contribute please give a star to let me know there's others interested. [^1] Duke Study https://jamanetwork.com/journals/jamanetworkopen/fullarticle/2783046 [^2] [cancerimagingarchive https://www.breastcancer.org/facts-statistics](https://www.cancerimagingarchive.net/collection/breast-cancer-screening-dbt) A study conducted by Duke University Health System assessed "deep learning" and "medical imaging in general" have significant advancements left to go in the future. Their conclusion comes following thier own AI models, trained to detect cancer in a non-invasive way(requiring no biopsy), was evaluated at only 65% sensitivity. Although in reality no easy feat, a disappointing statistic from the US's 7th best University. I, having experience in the industry & seeking a meaningful project to work on, felt compelled to see what I could do to move the needle. The result of this was creating a model which was evaluated at 62% using scikit-learn's classification report. The model is hosted on Hugging Face linked below. And soon radiologist & patients will all be able to use this model for free(and future improved versions of it) at https://lnkd.in/eTWCD2wu https://lnkd.in/eTx_sw9k
starnet/01-star21-06-22-01
starnet
"2024-06-22T23:43:27Z"
0
0
null
[ "any-to-any", "omega", "omegalabs", "bittensor", "agi", "license:mit", "region:us" ]
null
"2024-06-22T23:35:51Z"
--- license: mit tags: - any-to-any - omega - omegalabs - bittensor - agi --- This is an Any-to-Any model checkpoint for the OMEGA Labs x Bittensor Any-to-Any subnet. Check out the [git repo](https://github.com/omegalabsinc/omegalabs-anytoany-bittensor) and find OMEGA on X: [@omegalabsai](https://x.com/omegalabsai).
Amrak/FirstStepAi
Amrak
"2024-06-22T23:37:41Z"
0
0
null
[ "license:bigscience-openrail-m", "region:us" ]
null
"2024-06-22T23:37:41Z"
--- license: bigscience-openrail-m ---
SicariusSicariiStuff/LLAMA-3_8B_Unaligned_Alpha_RP_Soup_EXL2_5.0bpw
SicariusSicariiStuff
"2024-06-23T06:00:38Z"
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "5-bit", "exl2", "region:us" ]
text-generation
"2024-06-22T23:42:34Z"
--- license: apache-2.0 ---
SicariusSicariiStuff/LLAMA-3_8B_Unaligned_Alpha_RP_Soup_EXL2_6.0bpw
SicariusSicariiStuff
"2024-06-23T06:51:52Z"
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "6-bit", "exl2", "region:us" ]
text-generation
"2024-06-22T23:42:50Z"
--- license: apache-2.0 ---
SicariusSicariiStuff/LLAMA-3_8B_Unaligned_Alpha_RP_Soup_EXL2_7.0bpw
SicariusSicariiStuff
"2024-06-23T06:46:33Z"
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "7-bit", "exl2", "region:us" ]
text-generation
"2024-06-22T23:43:12Z"
--- license: apache-2.0 ---
SicariusSicariiStuff/LLAMA-3_8B_Unaligned_Alpha_RP_Soup_EXL2_8.0bpw
SicariusSicariiStuff
"2024-06-23T06:10:00Z"
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "8-bit", "exl2", "region:us" ]
text-generation
"2024-06-22T23:43:32Z"
--- license: apache-2.0 ---
shuyuej/MedMistral-MoE-French
shuyuej
"2024-06-23T02:41:58Z"
0
0
null
[ "safetensors", "license:apache-2.0", "region:us" ]
null
"2024-06-22T23:43:41Z"
--- license: apache-2.0 ---
shuyuej/MedMistral-MoE-Multilingual
shuyuej
"2024-06-24T23:08:09Z"
0
0
null
[ "safetensors", "license:apache-2.0", "region:us" ]
null
"2024-06-22T23:44:19Z"
--- license: apache-2.0 ---
shuyuej/MedLLaMA3-70B-French
shuyuej
"2024-06-24T14:55:59Z"
0
0
null
[ "safetensors", "license:apache-2.0", "region:us" ]
null
"2024-06-22T23:49:38Z"
--- license: apache-2.0 ---