|
tokenizer: |
|
_component_: torchtune.models.mistral.mistral_tokenizer |
|
path: /tmp/Mistral-7B-Instruct-v0.2/tokenizer.model |
|
dataset: |
|
_component_: torchtune.datasets.instruct_dataset |
|
source: prithviraj-maurya/legalbench-entire |
|
template: AlpacaInstructTemplate |
|
column_map: |
|
instruction: instruction |
|
input: question |
|
output: answer |
|
max_seq_len: 256 |
|
train_on_input: true |
|
split: train |
|
seed: null |
|
shuffle: true |
|
model: |
|
_component_: torchtune.models.mistral.qlora_mistral_7b |
|
lora_attn_modules: |
|
- q_proj |
|
- k_proj |
|
- v_proj |
|
apply_lora_to_mlp: true |
|
apply_lora_to_output: false |
|
lora_rank: 64 |
|
lora_alpha: 16 |
|
checkpointer: |
|
_component_: torchtune.utils.FullModelHFCheckpointer |
|
checkpoint_dir: /tmp/Mistral-7B-Instruct-v0.2 |
|
checkpoint_files: |
|
- pytorch_model-00001-of-00003.bin |
|
- pytorch_model-00002-of-00003.bin |
|
- pytorch_model-00003-of-00003.bin |
|
recipe_checkpoint: null |
|
output_dir: /tmp/Mistral-7B-Instruct-v0.2 |
|
model_type: MISTRAL |
|
resume_from_checkpoint: false |
|
optimizer: |
|
_component_: torch.optim.AdamW |
|
lr: 2.0e-05 |
|
lr_scheduler: |
|
_component_: torchtune.modules.get_cosine_schedule_with_warmup |
|
num_warmup_steps: 100 |
|
loss: |
|
_component_: torch.nn.CrossEntropyLoss |
|
batch_size: 4 |
|
epochs: 10 |
|
max_steps_per_epoch: 100000 |
|
gradient_accumulation_steps: 4 |
|
compile: false |
|
device: cuda |
|
enable_activation_checkpointing: true |
|
dtype: fp32 |
|
output_dir: /logs/mistral_7b_qlora_single_device_finetune |
|
metric_logger: |
|
_component_: torchtune.utils.metric_logging.WandBLogger |
|
project: torchtune |
|
log_every_n_steps: 10 |
|
profiler: |
|
_component_: torchtune.utils.profiler |
|
enabled: false |
|
output_dir: /tmp/alpaca-llama2-finetune/torchtune_perf_tracing.json |
|
|