File size: 925 Bytes
ce24f5e 8d959a7 ce24f5e 8d959a7 ce24f5e 8d959a7 ce24f5e 8d959a7 ce24f5e 949a27b ce24f5e 8d959a7 ce24f5e 8d959a7 949a27b 8d959a7 ce24f5e 8d959a7 ce24f5e 949a27b 8d959a7 ce24f5e 949a27b ce24f5e 8d959a7 ce24f5e 8d959a7 ce24f5e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
base_model: EleutherAI/pythia-1.4b-deduped
model_type: GPTNeoXForCausalLM
tokenizer_type: AutoTokenizer
load_in_8bit: true
datasets:
- path: data/alpaca_data_gpt4.jsonl
type: alpaca
- path: data/vicuna_cleaned.jsonl
type: sharegpt
- path: data/gpt4-instruct-similarity-0.6-dataset.jsonl
type: gpteacher
- path: data/roleplay-similarity_0.6-instruct-dataset.jsonl
type: gpteacher
val_set_size: 0.05
adapter: lora
lora_model_dir:
sequence_len: 2048
lora_r: 8
lora_alpha: 32
lora_dropout: 0.05
lora_target_modules:
- query_key_value
# - xxx
lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
wandb_project: pythia-1.4b-lora
wandb_watch:
wandb_run_name:
wandb_log_model: checkpoint
output_dir: ./lora-alpaca
batch_size: 48
micro_batch_size: 4
num_epochs: 5
learning_rate: 0.00001
train_on_inputs: false
group_by_length: false
bf16: True
tf32: True
resume_from_checkpoint:
local_rank:
deepspeed:
|