# Model arguments | |
model_name_or_path: sanchit-gandhi/Mistral-1.5B-Instruct-v0.2 | |
teacher_model_name_or_path: mistralai/Mistral-7B-Instruct-v0.2 | |
tokenizer_name: mistralai/Mistral-7B-Instruct-v0.2 | |
dtype: bfloat16 | |
load_teacher_in_4bit: true | |
# Data arguments | |
train_dataset_name: HuggingFaceTB/cosmopedia | |
train_dataset_config_name: | |
- auto_math_text | |
- khanacademy | |
- openstax | |
- stanford | |
- stories | |
- web_samples_v1 | |
- web_samples_v2 | |
- wikihow | |
train_split_name: train[1000:] | |
eval_split_name: train[:1000] | |
max_steps: 200000 | |
max_train_samples: 20000000 | |
# Training arguments | |
do_train: true | |
do_eval: true | |
per_device_eval_batch_size: 8 | |
per_device_train_batch_size: 8 | |
learning_rate: 0.0001 | |
warmup_steps: 500 | |
gradient_checkpointing: true | |
dataloader_num_workers: 4 | |
preprocessing_num_workers: 32 | |
ddp_timeout: 7200 | |
save_strategy: steps | |
save_steps: 5000 | |
evaluation_strategy: steps | |
eval_steps: 5000 | |
logging_steps: 25 | |
output_router_logits: true | |
report_to: all | |
output_dir: ./ | |
overwrite_output_dir: false | |
save_total_limit: 1 | |
wandb_project: distil-mistral | |
push_to_hub: true | |