winglian commited on
Commit
15f7910
1 Parent(s): d28ba2e

llama-3 examples (#1537)

Browse files
examples/llama-3/README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Llama-3
2
+
3
+ https://llama.meta.com/llama3/
4
+
5
+ [8B Base Model](https://huggingface.co/meta-llama/Meta-Llama-3-8B)
6
+ - [Full Fine Tune](./fft-8b.yaml)
7
+ - Single GPU @ 48GB VRAM
8
+ - [LoRA](./lora-8b.yml)
9
+ - Single GPU @ 11GB VRAM
10
+
11
+ [70B Base Model](https://huggingface.co/meta-llama/Meta-Llama-3-70B)
12
+ - [QLORA+FSDP](./qlora-fsdp-70b.yaml)
13
+ - Dual GPU @ 21GB VRAM
examples/llama-3/fft-8b.yaml ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: meta-llama/Meta-Llama-3-8B
2
+ model_type: LlamaForCausalLM
3
+ tokenizer_type: AutoTokenizer
4
+
5
+ load_in_8bit: false
6
+ load_in_4bit: false
7
+ strict: false
8
+
9
+ datasets:
10
+ - path: tatsu-lab/alpaca
11
+ type: alpaca
12
+ dataset_prepared_path: last_run_prepared
13
+ val_set_size: 0.05
14
+ output_dir: ./out
15
+
16
+ sequence_len: 8192
17
+ sample_packing: true
18
+ pad_to_sequence_len: true
19
+
20
+ wandb_project:
21
+ wandb_entity:
22
+ wandb_watch:
23
+ wandb_name:
24
+ wandb_log_model:
25
+
26
+ gradient_accumulation_steps: 8
27
+ micro_batch_size: 1
28
+ num_epochs: 1
29
+ optimizer: paged_adamw_8bit
30
+ lr_scheduler: cosine
31
+ learning_rate: 2e-5
32
+
33
+ train_on_inputs: false
34
+ group_by_length: false
35
+ bf16: auto
36
+ fp16:
37
+ tf32: false
38
+
39
+ gradient_checkpointing: true
40
+ gradient_checkpointing_kwargs:
41
+ use_reentrant: false
42
+ early_stopping_patience:
43
+ resume_from_checkpoint:
44
+ logging_steps: 1
45
+ xformers_attention:
46
+ flash_attention: true
47
+
48
+ warmup_steps: 100
49
+ evals_per_epoch: 2
50
+ eval_table_size:
51
+ saves_per_epoch: 1
52
+ debug:
53
+ deepspeed:
54
+ weight_decay: 0.0
55
+ fsdp:
56
+ fsdp_config:
57
+ special_tokens:
58
+ pad_token: <|end_of_text|>
examples/llama-3/lora-8b.yml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: NousResearch/Llama-2-7b-hf
2
+ model_type: LlamaForCausalLM
3
+ tokenizer_type: LlamaTokenizer
4
+
5
+ load_in_8bit: true
6
+ load_in_4bit: false
7
+ strict: false
8
+
9
+ datasets:
10
+ - path: mhenrichsen/alpaca_2k_test
11
+ type: alpaca
12
+ dataset_prepared_path:
13
+ val_set_size: 0.05
14
+ output_dir: ./lora-out
15
+
16
+ sequence_len: 4096
17
+ sample_packing: true
18
+ pad_to_sequence_len: true
19
+
20
+ adapter: lora
21
+ lora_model_dir:
22
+ lora_r: 32
23
+ lora_alpha: 16
24
+ lora_dropout: 0.05
25
+ lora_target_linear: true
26
+ lora_fan_in_fan_out:
27
+
28
+ wandb_project:
29
+ wandb_entity:
30
+ wandb_watch:
31
+ wandb_name:
32
+ wandb_log_model:
33
+
34
+ gradient_accumulation_steps: 4
35
+ micro_batch_size: 2
36
+ num_epochs: 4
37
+ optimizer: adamw_bnb_8bit
38
+ lr_scheduler: cosine
39
+ learning_rate: 0.0002
40
+
41
+ train_on_inputs: false
42
+ group_by_length: false
43
+ bf16: auto
44
+ fp16:
45
+ tf32: false
46
+
47
+ gradient_checkpointing: true
48
+ early_stopping_patience:
49
+ resume_from_checkpoint:
50
+ local_rank:
51
+ logging_steps: 1
52
+ xformers_attention:
53
+ flash_attention: true
54
+ s2_attention:
55
+
56
+ warmup_steps: 10
57
+ evals_per_epoch: 4
58
+ eval_table_size:
59
+ eval_max_new_tokens: 128
60
+ saves_per_epoch: 1
61
+ debug:
62
+ deepspeed:
63
+ weight_decay: 0.0
64
+ fsdp:
65
+ fsdp_config:
66
+ special_tokens:
examples/llama-3/qlora-fsdp-70b.yaml ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: casperhansen/llama-3-70b-fp16
2
+ model_type: LlamaForCausalLM
3
+ tokenizer_type: AutoTokenizer # PreTrainedTokenizerFast
4
+
5
+ load_in_8bit: false
6
+ load_in_4bit: true
7
+ strict: false
8
+
9
+ datasets:
10
+ - path: tatsu-lab/alpaca
11
+ type: alpaca
12
+ dataset_prepared_path: last_run_prepared
13
+ val_set_size: 0.05
14
+ output_dir: ./out/qlora-llama3-70b
15
+
16
+ adapter: qlora
17
+ lora_model_dir:
18
+
19
+ sequence_len: 512
20
+ sample_packing: false
21
+ pad_to_sequence_len: true
22
+
23
+ lora_r: 8
24
+ lora_alpha: 16
25
+ lora_dropout: 0.05
26
+ lora_target_modules:
27
+ lora_target_linear: true
28
+ lora_fan_in_fan_out:
29
+
30
+ wandb_project:
31
+ wandb_entity:
32
+ wandb_watch:
33
+ wandb_name:
34
+ wandb_log_model:
35
+
36
+ gradient_accumulation_steps: 4
37
+ micro_batch_size: 1
38
+ num_epochs: 4
39
+ optimizer: adamw_torch
40
+ lr_scheduler: cosine
41
+ learning_rate: 0.00001
42
+
43
+ train_on_inputs: false
44
+ group_by_length: false
45
+ bf16: auto
46
+ fp16:
47
+ tf32: false
48
+
49
+ gradient_checkpointing: true
50
+ gradient_checkpointing_kwargs:
51
+ use_reentrant: true
52
+ early_stopping_patience:
53
+ resume_from_checkpoint:
54
+ local_rank:
55
+ logging_steps: 1
56
+ xformers_attention:
57
+ flash_attention: true
58
+
59
+ warmup_steps: 10
60
+ evals_per_epoch: 4
61
+ eval_table_size:
62
+ saves_per_epoch: 1
63
+ debug:
64
+ deepspeed:
65
+ weight_decay: 0.0
66
+ fsdp:
67
+ - full_shard
68
+ - auto_wrap
69
+ fsdp_config:
70
+ fsdp_limit_all_gathers: true
71
+ fsdp_sync_module_states: true
72
+ fsdp_offload_params: true
73
+ fsdp_use_orig_params: false
74
+ fsdp_cpu_ram_efficient_loading: true
75
+ fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
76
+ fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer
77
+ fsdp_state_dict_type: FULL_STATE_DICT
78
+ fsdp_sharding_strategy: FULL_SHARD
79
+ special_tokens:
80
+ pad_token: <|end_of_text|>