taddeusb90
commited on
Commit
•
6d4a8e8
1
Parent(s):
757c8a7
Upload 3 files
Browse files
adapter/adapter_config.json
CHANGED
@@ -10,22 +10,22 @@
|
|
10 |
"layers_pattern": null,
|
11 |
"layers_to_transform": null,
|
12 |
"loftq_config": {},
|
13 |
-
"lora_alpha":
|
14 |
"lora_dropout": 0.05,
|
15 |
"megatron_config": null,
|
16 |
"megatron_core": "megatron.core",
|
17 |
"modules_to_save": null,
|
18 |
"peft_type": "LORA",
|
19 |
-
"r":
|
20 |
"rank_pattern": {},
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
23 |
-
"down_proj",
|
24 |
-
"o_proj",
|
25 |
"up_proj",
|
26 |
-
"gate_proj",
|
27 |
"k_proj",
|
|
|
|
|
28 |
"q_proj",
|
|
|
29 |
"v_proj"
|
30 |
],
|
31 |
"task_type": "CAUSAL_LM",
|
|
|
10 |
"layers_pattern": null,
|
11 |
"layers_to_transform": null,
|
12 |
"loftq_config": {},
|
13 |
+
"lora_alpha": 16,
|
14 |
"lora_dropout": 0.05,
|
15 |
"megatron_config": null,
|
16 |
"megatron_core": "megatron.core",
|
17 |
"modules_to_save": null,
|
18 |
"peft_type": "LORA",
|
19 |
+
"r": 8,
|
20 |
"rank_pattern": {},
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
|
|
|
|
23 |
"up_proj",
|
|
|
24 |
"k_proj",
|
25 |
+
"o_proj",
|
26 |
+
"down_proj",
|
27 |
"q_proj",
|
28 |
+
"gate_proj",
|
29 |
"v_proj"
|
30 |
],
|
31 |
"task_type": "CAUSAL_LM",
|
adapter/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:141e7b3cfb0c6232028bb14bab844dc0b18de1d556f32015fc2f281f39561a72
|
3 |
+
size 42002584
|
axolotl/training.yaml
CHANGED
@@ -11,7 +11,7 @@ datasets:
|
|
11 |
type: alpaca
|
12 |
dataset_prepared_path: last_run_prepared
|
13 |
val_set_size: 0.01
|
14 |
-
output_dir: ./out/finbro-v0.1.0-llama-3-8B-instruct-
|
15 |
|
16 |
adapter: qlora
|
17 |
lora_model_dir:
|
@@ -20,22 +20,23 @@ sequence_len: 8192
|
|
20 |
sample_packing: false
|
21 |
pad_to_sequence_len: true
|
22 |
|
23 |
-
lora_r:
|
24 |
-
lora_alpha:
|
25 |
lora_dropout: 0.05
|
26 |
lora_target_modules:
|
27 |
lora_target_linear: true
|
28 |
lora_fan_in_fan_out:
|
29 |
|
30 |
-
wandb_project: finbro-v0.1.0-llama-3-8B-instruct-
|
31 |
wandb_entity: sigmance
|
32 |
wandb_watch: "true"
|
33 |
-
wandb_name: finbro-v0.1.0-llama-3-8B-instruct-
|
34 |
wandb_log_model: "true"
|
35 |
|
36 |
use_pose: true
|
37 |
pose_max_context_len: 1048576
|
38 |
|
|
|
39 |
overrides_of_model_config:
|
40 |
rope_theta: 500000.0
|
41 |
max_position_embeddings: 1048576
|
@@ -58,7 +59,7 @@ gradient_checkpointing: true
|
|
58 |
gradient_checkpointing_kwargs:
|
59 |
use_reentrant: true
|
60 |
early_stopping_patience: 50
|
61 |
-
resume_from_checkpoint: ./out/finbro-v0.1.0-llama-3-8B-instruct-
|
62 |
local_rank:
|
63 |
logging_steps: 1
|
64 |
xformers_attention:
|
@@ -72,7 +73,7 @@ eval_steps: 100
|
|
72 |
eval_table_size:
|
73 |
# saves_per_epoch: 1
|
74 |
debug:
|
75 |
-
deepspeed:
|
76 |
weight_decay: 0.0
|
77 |
fsdp:
|
78 |
- full_shard
|
|
|
11 |
type: alpaca
|
12 |
dataset_prepared_path: last_run_prepared
|
13 |
val_set_size: 0.01
|
14 |
+
output_dir: ./out/finbro-v0.1.0-llama-3-8B-instruct-1m
|
15 |
|
16 |
adapter: qlora
|
17 |
lora_model_dir:
|
|
|
20 |
sample_packing: false
|
21 |
pad_to_sequence_len: true
|
22 |
|
23 |
+
lora_r: 8
|
24 |
+
lora_alpha: 16
|
25 |
lora_dropout: 0.05
|
26 |
lora_target_modules:
|
27 |
lora_target_linear: true
|
28 |
lora_fan_in_fan_out:
|
29 |
|
30 |
+
wandb_project: finbro-v0.1.0-llama-3-8B-instruct-131k
|
31 |
wandb_entity: sigmance
|
32 |
wandb_watch: "true"
|
33 |
+
wandb_name: finbro-v0.1.0-llama-3-8B-instruct-1m
|
34 |
wandb_log_model: "true"
|
35 |
|
36 |
use_pose: true
|
37 |
pose_max_context_len: 1048576
|
38 |
|
39 |
+
# lora_on_cpu:
|
40 |
overrides_of_model_config:
|
41 |
rope_theta: 500000.0
|
42 |
max_position_embeddings: 1048576
|
|
|
59 |
gradient_checkpointing_kwargs:
|
60 |
use_reentrant: true
|
61 |
early_stopping_patience: 50
|
62 |
+
resume_from_checkpoint: ./out/finbro-v0.1.0-llama-3-8B-instruct-131k/checkpoint-3500
|
63 |
local_rank:
|
64 |
logging_steps: 1
|
65 |
xformers_attention:
|
|
|
73 |
eval_table_size:
|
74 |
# saves_per_epoch: 1
|
75 |
debug:
|
76 |
+
deepspeed:
|
77 |
weight_decay: 0.0
|
78 |
fsdp:
|
79 |
- full_shard
|