fangzhaoz commited on
Commit
c920b29
1 Parent(s): 19194ba

End of training

Browse files
README.md CHANGED
@@ -14,7 +14,7 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # llama3-orchamath-lora
16
 
17
- This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on the None dataset.
18
 
19
  ## Model description
20
 
@@ -33,10 +33,10 @@ More information needed
33
  ### Training hyperparameters
34
 
35
  The following hyperparameters were used during training:
36
- - learning_rate: 1e-05
37
  - train_batch_size: 8
38
  - eval_batch_size: 8
39
- - seed: 0
40
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
41
  - lr_scheduler_type: constant
42
  - num_epochs: 1
@@ -47,8 +47,8 @@ The following hyperparameters were used during training:
47
 
48
  ### Framework versions
49
 
50
- - PEFT 0.9.0
51
- - Transformers 4.39.3
52
- - Pytorch 2.2.2
53
  - Datasets 2.18.0
54
  - Tokenizers 0.15.2
 
14
 
15
  # llama3-orchamath-lora
16
 
17
+ This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on an unknown dataset.
18
 
19
  ## Model description
20
 
 
33
  ### Training hyperparameters
34
 
35
  The following hyperparameters were used during training:
36
+ - learning_rate: 0.0001
37
  - train_batch_size: 8
38
  - eval_batch_size: 8
39
+ - seed: 42
40
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
41
  - lr_scheduler_type: constant
42
  - num_epochs: 1
 
47
 
48
  ### Framework versions
49
 
50
+ - PEFT 0.10.0
51
+ - Transformers 4.38.2
52
+ - Pytorch 2.2.1
53
  - Datasets 2.18.0
54
  - Tokenizers 0.15.2
adapter_config.json CHANGED
@@ -6,27 +6,28 @@
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
 
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
  "loftq_config": {},
12
- "lora_alpha": 128,
13
- "lora_dropout": 0.05,
14
  "megatron_config": null,
15
  "megatron_core": "megatron.core",
16
  "modules_to_save": null,
17
  "peft_type": "LORA",
18
- "r": 64,
19
  "rank_pattern": {},
20
  "revision": null,
21
  "spectral_bottom": false,
22
  "spectral_top": false,
23
  "target_modules": [
24
- "v_proj",
25
- "up_proj",
26
  "down_proj",
 
27
  "k_proj",
28
- "gate_proj",
29
- "q_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
9
+ "layer_replication": null,
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.1,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 8,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "spectral_bottom": false,
23
  "spectral_top": false,
24
  "target_modules": [
 
 
25
  "down_proj",
26
+ "up_proj",
27
  "k_proj",
28
+ "q_proj",
29
+ "v_proj",
30
+ "gate_proj"
31
  ],
32
  "task_type": "CAUSAL_LM",
33
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2fd3843c76462e08f5bddcf5d18944c89311905fc56835e0a19e8da0643e546b
3
- size 302041912
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb7626b696273f67961180cfbef24e1cce6771be90e8042fe12a84e07f6d10bf
3
+ size 37799672
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:55f600243213199b42cdde022d8f24d67cb44b4eadf462c5e270160d256ff575
3
  size 4920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:274f719e7b1e68343980c1ee3245161bddb6bd6d0fb800240c2adeaa900af876
3
  size 4920