|
--- |
|
datasets: |
|
- yahma/alpaca_cleaned |
|
- lksy/ru_instruct_gpt4 |
|
language: |
|
- ru |
|
pipeline_tag: text2text-generation |
|
inference: false |
|
--- |
|
|
|
|
|
Based on [LLaMA 13B](https://huggingface.co/yahma/llama-13b-hf). |
|
|
|
Trained on 4 LoRA modules. |
|
|
|
Parameters: |
|
|
|
``` |
|
{ |
|
"base_model_name_or_path": "./llama-30b-hf", |
|
"bias": "none", |
|
"enable_lora": null, |
|
"fan_in_fan_out": false, |
|
"inference_mode": true, |
|
"lora_alpha": 16, |
|
"lora_dropout": 0.05, |
|
"merge_weights": false, |
|
"modules_to_save": null, |
|
"peft_type": "LORA", |
|
"r": 16, |
|
"target_modules": [ |
|
"q_proj", |
|
"v_proj", |
|
"k_proj", |
|
"o_proj" |
|
], |
|
"task_type": "CAUSAL_LM" |
|
} |
|
``` |
|
Cutoff length set to 512 |
|
|
|
|
|
``` |
|
Prompt template: |
|
|
|
{ |
|
"description": "A shorter template to experiment with.", |
|
"prompt_input": "### Задание:\n{instruction}\n\n### Вход:\n{input}\n\n### Ответ:\n", |
|
"prompt_no_input": "### Задание:\n{instruction}\n\n### Ответ:\n", |
|
"response_split": "### Ответ:" |
|
} |
|
``` |
|
|
|
[WandB report](https://wandb.ai/lksy/huggingface/runs/oj1ezptd) |
|
|
|
Epochs: 4 |
|
|
|
Loss: 0.853 |
|
|