taddeusb90 commited on
Commit
6c075f2
1 Parent(s): 72a6437

Upload 3 files

Browse files
adapter/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "cognitivecomputations/dolphin-2.9-llama3-8b",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "up_proj",
24
+ "down_proj",
25
+ "o_proj",
26
+ "k_proj",
27
+ "v_proj",
28
+ "q_proj",
29
+ "gate_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53af49bddc4fb4270cd02d5bc8b79b6494eeb125631ab5819573e774ef1785a3
3
+ size 42002584
axolotl/training.yaml ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: cognitivecomputations/dolphin-2.9-llama3-8b
2
+ model_type: LlamaForCausalLM
3
+ tokenizer_type: AutoTokenizer # PreTrainedTokenizerFast
4
+
5
+ load_in_8bit: false
6
+ load_in_4bit: true
7
+ strict: false
8
+
9
+ datasets:
10
+ - path: taddeusb90/finbro-v0.1.0
11
+ type: alpaca
12
+ dataset_prepared_path: last_run_prepared
13
+ val_set_size: 0.01
14
+ output_dir: ./out/finbro-v0.1.0-dolphin-2.9-llama-3-8B-instruct-131k
15
+
16
+ adapter: qlora
17
+ lora_model_dir:
18
+
19
+ sequence_len: 8192
20
+ sample_packing: false
21
+ pad_to_sequence_len: true
22
+
23
+ lora_r: 8
24
+ lora_alpha: 16
25
+ lora_dropout: 0.05
26
+ lora_target_modules:
27
+ lora_target_linear: true
28
+ lora_fan_in_fan_out:
29
+
30
+ wandb_project: finbro-v0.1.0-dolphin-2.9-llama-3-8B-instruct
31
+ wandb_entity: sigmance
32
+ wandb_watch: "true"
33
+ wandb_name: finbro-v0.1.0-dolphin-2.9-llama-3-8B-instruct-131k
34
+ wandb_log_model: "true"
35
+
36
+ use_pose: true
37
+ pose_max_context_len: 131072
38
+
39
+ # lora_on_cpu:
40
+ overrides_of_model_config:
41
+ rope_theta: 500000.0
42
+ max_position_embeddings: 131072
43
+ rope_scaling:
44
+
45
+ gradient_accumulation_steps: 4
46
+ micro_batch_size: 1
47
+ num_epochs: 4
48
+ optimizer: adamw_torch
49
+ lr_scheduler: cosine
50
+ learning_rate: 0.0002
51
+
52
+ train_on_inputs: false
53
+ group_by_length: false
54
+ bf16: auto
55
+ fp16:
56
+ tf32: false
57
+
58
+ gradient_checkpointing: true
59
+ gradient_checkpointing_kwargs:
60
+ use_reentrant: true
61
+ early_stopping_patience: 50
62
+ resume_from_checkpoint: ./out/finbro-v0.1.0-dolphin-2.9-llama-3-8B-instruct-131k/checkpoint-5500
63
+ local_rank:
64
+ logging_steps: 1
65
+ xformers_attention:
66
+ flash_attention: true
67
+
68
+
69
+ warmup_steps: 1
70
+ save_steps: 100
71
+ eval_steps: 100
72
+ # evals_per_epoch: 4
73
+ eval_table_size:
74
+ # saves_per_epoch: 1
75
+ debug:
76
+ deepspeed:
77
+ weight_decay: 0.0
78
+ fsdp:
79
+ - full_shard
80
+ - auto_wrap
81
+ fsdp_config:
82
+ fsdp_limit_all_gathers: true
83
+ fsdp_sync_module_states: true
84
+ fsdp_offload_params: true
85
+ fsdp_use_orig_params: false
86
+ fsdp_cpu_ram_efficient_loading: true
87
+ fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
88
+ fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer
89
+ fsdp_state_dict_type: FULL_STATE_DICT
90
+ fsdp_sharding_strategy: FULL_SHARD
91
+ special_tokens:
92
+ pad_token: <|end_of_text|>