aengusl commited on
Commit
9a299bf
1 Parent(s): 7823b7c

Model save

Browse files
README.md CHANGED
@@ -1,8 +1,13 @@
1
  ---
2
  license: llama2
3
- base_model: meta-llama/Llama-2-7b-chat-hf
4
  tags:
 
 
5
  - generated_from_trainer
 
 
 
6
  model-index:
7
  - name: llama2-7b-sft-lora
8
  results: []
@@ -13,7 +18,7 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # llama2-7b-sft-lora
15
 
16
- This model is a fine-tuned version of [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on an unknown dataset.
17
 
18
  ## Model description
19
 
@@ -37,17 +42,16 @@ The following hyperparameters were used during training:
37
  - eval_batch_size: 8
38
  - seed: 4
39
  - distributed_type: multi-GPU
40
- - num_devices: 4
41
  - gradient_accumulation_steps: 4
42
- - total_train_batch_size: 64
43
- - total_eval_batch_size: 32
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
  - lr_scheduler_type: cosine
46
  - num_epochs: 1
47
 
48
  ### Framework versions
49
 
50
- - Transformers 4.35.0
51
- - Pytorch 2.1.0+cu121
52
- - Datasets 2.14.6
53
- - Tokenizers 0.14.1
 
 
1
  ---
2
  license: llama2
3
+ library_name: peft
4
  tags:
5
+ - trl
6
+ - sft
7
  - generated_from_trainer
8
+ datasets:
9
+ - generator
10
+ base_model: meta-llama/Llama-2-7b-chat-hf
11
  model-index:
12
  - name: llama2-7b-sft-lora
13
  results: []
 
18
 
19
  # llama2-7b-sft-lora
20
 
21
+ This model is a fine-tuned version of [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on the generator dataset.
22
 
23
  ## Model description
24
 
 
42
  - eval_batch_size: 8
43
  - seed: 4
44
  - distributed_type: multi-GPU
 
45
  - gradient_accumulation_steps: 4
46
+ - total_train_batch_size: 16
 
47
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
  - lr_scheduler_type: cosine
49
  - num_epochs: 1
50
 
51
  ### Framework versions
52
 
53
+ - PEFT 0.8.2
54
+ - Transformers 4.37.2
55
+ - Pytorch 2.2.0+cu121
56
+ - Datasets 2.16.1
57
+ - Tokenizers 0.15.1
adapter_config.json CHANGED
@@ -8,20 +8,24 @@
8
  "init_lora_weights": true,
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
 
11
  "lora_alpha": 16,
12
  "lora_dropout": 0.1,
 
 
13
  "modules_to_save": null,
14
  "peft_type": "LORA",
15
  "r": 64,
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "v_proj",
20
- "down_proj",
21
  "k_proj",
22
  "q_proj",
23
  "up_proj",
24
- "o_proj"
 
 
25
  ],
26
- "task_type": "CAUSAL_LM"
 
27
  }
 
8
  "init_lora_weights": true,
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
+ "loftq_config": {},
12
  "lora_alpha": 16,
13
  "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
  "modules_to_save": null,
17
  "peft_type": "LORA",
18
  "r": 64,
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
 
22
  "k_proj",
23
  "q_proj",
24
  "up_proj",
25
+ "o_proj",
26
+ "down_proj",
27
+ "v_proj"
28
  ],
29
+ "task_type": "CAUSAL_LM",
30
+ "use_rslora": false
31
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:49df4de880eb6fb53f2feb9bff517e17dcecd6384c75dfb00a947b40744189bf
3
  size 258001832
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d1d8ee5e122aa5ba15194f89a4f794f9c3fe72e5c3ede8c7f35cf55983cee6c
3
  size 258001832
runs/May16_15-03-43_d8bcbe5ca3fa/events.out.tfevents.1715871854.d8bcbe5ca3fa ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8251750c14fb524f9911c2ab11b5ba52becf762e264be86b21068d2ec2cf9ec0
3
+ size 3668
step_0/README.md CHANGED
@@ -18,6 +18,7 @@ base_model: meta-llama/Llama-2-7b-chat-hf
18
 
19
 
20
  - **Developed by:** [More Information Needed]
 
21
  - **Shared by [optional]:** [More Information Needed]
22
  - **Model type:** [More Information Needed]
23
  - **Language(s) (NLP):** [More Information Needed]
@@ -76,7 +77,7 @@ Use the code below to get started with the model.
76
 
77
  ### Training Data
78
 
79
- <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
80
 
81
  [More Information Needed]
82
 
@@ -107,7 +108,7 @@ Use the code below to get started with the model.
107
 
108
  #### Testing Data
109
 
110
- <!-- This should link to a Data Card if possible. -->
111
 
112
  [More Information Needed]
113
 
@@ -198,17 +199,6 @@ Carbon emissions can be estimated using the [Machine Learning Impact calculator]
198
  [More Information Needed]
199
 
200
 
201
- ## Training procedure
202
-
203
-
204
- ### Framework versions
205
-
206
-
207
- - PEFT 0.6.1
208
- ## Training procedure
209
-
210
-
211
  ### Framework versions
212
 
213
-
214
- - PEFT 0.6.1
 
18
 
19
 
20
  - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
  - **Shared by [optional]:** [More Information Needed]
23
  - **Model type:** [More Information Needed]
24
  - **Language(s) (NLP):** [More Information Needed]
 
77
 
78
  ### Training Data
79
 
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
 
82
  [More Information Needed]
83
 
 
108
 
109
  #### Testing Data
110
 
111
+ <!-- This should link to a Dataset Card if possible. -->
112
 
113
  [More Information Needed]
114
 
 
199
  [More Information Needed]
200
 
201
 
 
 
 
 
 
 
 
 
 
 
202
  ### Framework versions
203
 
204
+ - PEFT 0.8.2
 
step_0/adapter_config.json CHANGED
@@ -8,20 +8,24 @@
8
  "init_lora_weights": true,
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
 
11
  "lora_alpha": 16,
12
  "lora_dropout": 0.1,
 
 
13
  "modules_to_save": null,
14
  "peft_type": "LORA",
15
  "r": 64,
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "v_proj",
20
- "down_proj",
21
  "k_proj",
22
  "q_proj",
23
  "up_proj",
24
- "o_proj"
 
 
25
  ],
26
- "task_type": "CAUSAL_LM"
 
27
  }
 
8
  "init_lora_weights": true,
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
+ "loftq_config": {},
12
  "lora_alpha": 16,
13
  "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
  "modules_to_save": null,
17
  "peft_type": "LORA",
18
  "r": 64,
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
 
22
  "k_proj",
23
  "q_proj",
24
  "up_proj",
25
+ "o_proj",
26
+ "down_proj",
27
+ "v_proj"
28
  ],
29
+ "task_type": "CAUSAL_LM",
30
+ "use_rslora": false
31
  }
step_0/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:49df4de880eb6fb53f2feb9bff517e17dcecd6384c75dfb00a947b40744189bf
3
  size 258001832
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d1d8ee5e122aa5ba15194f89a4f794f9c3fe72e5c3ede8c7f35cf55983cee6c
3
  size 258001832
step_0/tokenizer_config.json CHANGED
@@ -1,4 +1,6 @@
1
  {
 
 
2
  "added_tokens_decoder": {
3
  "0": {
4
  "content": "<unk>",
 
1
  {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
step_0/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bee563d624555c62fb5e5f06436c72f745ecd459b45f582abffe9ea1cc31e4de
3
- size 5560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0972caf6dfbe417b15985b1fa88b70e41a7682ab205c10ce25d2feef0352bdc5
3
+ size 5688
tokenizer_config.json CHANGED
@@ -1,4 +1,6 @@
1
  {
 
 
2
  "added_tokens_decoder": {
3
  "0": {
4
  "content": "<unk>",
 
1
  {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bee563d624555c62fb5e5f06436c72f745ecd459b45f582abffe9ea1cc31e4de
3
- size 5560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0972caf6dfbe417b15985b1fa88b70e41a7682ab205c10ce25d2feef0352bdc5
3
+ size 5688