HenryJJ commited on
Commit
0852336
1 Parent(s): ed2e244
Files changed (1) hide show
  1. config/llama3-cqia.yml +82 -0
config/llama3-cqia.yml ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #Mistral-7b
2
+ base_model: NousResearch/Meta-Llama-3-8B
3
+ model_type: AutoModelForCausalLM
4
+ tokenizer_type: AutoTokenizer
5
+
6
+ load_in_8bit: false
7
+ load_in_4bit: true
8
+ strict: false
9
+
10
+ datasets:
11
+ - path: m-a-p/COIG-CQIA
12
+ data_files: COIG-CQIA-full.jsonl
13
+ type: alpaca:chatml
14
+ chat_template: chatml
15
+ dataset_prepared_path: cqia #Path to json dataset file in huggingface
16
+ val_set_size: 0.05
17
+ output_dir: ./llama3-cqia-out
18
+
19
+ adapter: qlora
20
+ lora_model_dir:
21
+
22
+ sequence_len: 5120
23
+ sample_packing: true
24
+ pad_to_sequence_len: true
25
+ eval_sample_packing: false
26
+
27
+ lora_r: 32
28
+ lora_alpha: 16
29
+ lora_dropout: 0.05
30
+ lora_target_modules:
31
+ lora_target_linear: true
32
+ lora_fan_in_fan_out:
33
+ lora_modules_to_save:
34
+ - embed_tokens
35
+ - lm_head
36
+
37
+ wandb_project: llama3-cqia
38
+ wandb_entity:
39
+ wandb_watch:
40
+ wandb_name:
41
+ wandb_log_model:
42
+
43
+ gradient_accumulation_steps: 2
44
+ micro_batch_size: 1
45
+ num_epochs: 2
46
+ optimizer: paged_adamw_32bit
47
+ lr_scheduler: cosine
48
+ learning_rate: 0.0002
49
+
50
+ train_on_inputs: false
51
+ group_by_length: false
52
+ bf16: auto
53
+ fp16:
54
+ tf32: false
55
+
56
+ gradient_checkpointing: true
57
+ gradient_checkpointing_kwargs:
58
+ use_reentrant: false
59
+ early_stopping_patience:
60
+ resume_from_checkpoint:
61
+ local_rank:
62
+ logging_steps: 2
63
+ xformers_attention:
64
+ flash_attention: true
65
+
66
+
67
+ warmup_steps: 10
68
+ evals_per_epoch: 4
69
+ eval_table_size:
70
+ eval_table_max_new_tokens:
71
+ saves_per_epoch: 1
72
+ debug: true
73
+ deepspeed:
74
+ weight_decay: 0.05
75
+ fsdp:
76
+ fsdp_config:
77
+ special_tokens:
78
+ eos_token: "<|im_end|>"
79
+ pad_token: "<|end_of_text|>"
80
+ tokens:
81
+ - "<|im_start|>"
82
+ - "<|im_end|>"