aengusl commited on
Commit
0f7ec91
1 Parent(s): a263a1e

Model save

Browse files
README.md CHANGED
@@ -1,8 +1,13 @@
1
  ---
2
  license: llama2
3
- base_model: meta-llama/Llama-2-7b-chat-hf
4
  tags:
 
 
5
  - generated_from_trainer
 
 
 
6
  model-index:
7
  - name: llama2-7b-sft-lora
8
  results: []
@@ -13,7 +18,7 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # llama2-7b-sft-lora
15
 
16
- This model is a fine-tuned version of [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on an unknown dataset.
17
 
18
  ## Model description
19
 
@@ -47,7 +52,8 @@ The following hyperparameters were used during training:
47
 
48
  ### Framework versions
49
 
50
- - Transformers 4.35.0
51
- - Pytorch 2.1.0+cu121
52
- - Datasets 2.14.6
53
- - Tokenizers 0.14.1
 
 
1
  ---
2
  license: llama2
3
+ library_name: peft
4
  tags:
5
+ - trl
6
+ - sft
7
  - generated_from_trainer
8
+ datasets:
9
+ - generator
10
+ base_model: meta-llama/Llama-2-7b-chat-hf
11
  model-index:
12
  - name: llama2-7b-sft-lora
13
  results: []
 
18
 
19
  # llama2-7b-sft-lora
20
 
21
+ This model is a fine-tuned version of [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on the generator dataset.
22
 
23
  ## Model description
24
 
 
52
 
53
  ### Framework versions
54
 
55
+ - PEFT 0.8.2
56
+ - Transformers 4.37.2
57
+ - Pytorch 2.2.0+cu121
58
+ - Datasets 2.16.1
59
+ - Tokenizers 0.15.1
adapter_config.json CHANGED
@@ -8,20 +8,24 @@
8
  "init_lora_weights": true,
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
 
11
  "lora_alpha": 16,
12
  "lora_dropout": 0.1,
 
 
13
  "modules_to_save": null,
14
  "peft_type": "LORA",
15
  "r": 64,
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "up_proj",
20
  "q_proj",
 
 
21
  "k_proj",
22
  "v_proj",
23
- "down_proj",
24
  "o_proj"
25
  ],
26
- "task_type": "CAUSAL_LM"
 
27
  }
 
8
  "init_lora_weights": true,
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
+ "loftq_config": {},
12
  "lora_alpha": 16,
13
  "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
  "modules_to_save": null,
17
  "peft_type": "LORA",
18
  "r": 64,
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
22
  "q_proj",
23
+ "down_proj",
24
+ "up_proj",
25
  "k_proj",
26
  "v_proj",
 
27
  "o_proj"
28
  ],
29
+ "task_type": "CAUSAL_LM",
30
+ "use_rslora": false
31
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:49df4de880eb6fb53f2feb9bff517e17dcecd6384c75dfb00a947b40744189bf
3
  size 258001832
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d1d8ee5e122aa5ba15194f89a4f794f9c3fe72e5c3ede8c7f35cf55983cee6c
3
  size 258001832
runs/May18_14-29-11_d8da557e34ee/events.out.tfevents.1716042579.d8da557e34ee.10821.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e74276703e85f640833475048a7ad0e68c08cf403eebfe0bf32e167ad855697
3
+ size 4477
step_0/README.md CHANGED
@@ -18,6 +18,7 @@ base_model: meta-llama/Llama-2-7b-chat-hf
18
 
19
 
20
  - **Developed by:** [More Information Needed]
 
21
  - **Shared by [optional]:** [More Information Needed]
22
  - **Model type:** [More Information Needed]
23
  - **Language(s) (NLP):** [More Information Needed]
@@ -76,7 +77,7 @@ Use the code below to get started with the model.
76
 
77
  ### Training Data
78
 
79
- <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
80
 
81
  [More Information Needed]
82
 
@@ -107,7 +108,7 @@ Use the code below to get started with the model.
107
 
108
  #### Testing Data
109
 
110
- <!-- This should link to a Data Card if possible. -->
111
 
112
  [More Information Needed]
113
 
@@ -198,143 +199,6 @@ Carbon emissions can be estimated using the [Machine Learning Impact calculator]
198
  [More Information Needed]
199
 
200
 
201
- ## Training procedure
202
-
203
-
204
- ### Framework versions
205
-
206
-
207
- - PEFT 0.6.1
208
- ## Training procedure
209
-
210
-
211
- ### Framework versions
212
-
213
-
214
- - PEFT 0.6.1
215
- ## Training procedure
216
-
217
-
218
- ### Framework versions
219
-
220
-
221
- - PEFT 0.6.1
222
- ## Training procedure
223
-
224
-
225
- ### Framework versions
226
-
227
-
228
- - PEFT 0.6.1
229
- ## Training procedure
230
-
231
-
232
- ### Framework versions
233
-
234
-
235
- - PEFT 0.6.1
236
- ## Training procedure
237
-
238
-
239
- ### Framework versions
240
-
241
-
242
- - PEFT 0.6.1
243
- ## Training procedure
244
-
245
-
246
- ### Framework versions
247
-
248
-
249
- - PEFT 0.6.1
250
- ## Training procedure
251
-
252
-
253
- ### Framework versions
254
-
255
-
256
- - PEFT 0.6.1
257
- ## Training procedure
258
-
259
-
260
- ### Framework versions
261
-
262
-
263
- - PEFT 0.6.1
264
- ## Training procedure
265
-
266
-
267
- ### Framework versions
268
-
269
-
270
- - PEFT 0.6.1
271
- ## Training procedure
272
-
273
-
274
- ### Framework versions
275
-
276
-
277
- - PEFT 0.6.1
278
- ## Training procedure
279
-
280
-
281
- ### Framework versions
282
-
283
-
284
- - PEFT 0.6.1
285
- ## Training procedure
286
-
287
-
288
  ### Framework versions
289
 
290
-
291
- - PEFT 0.6.1
292
- ## Training procedure
293
-
294
-
295
- ### Framework versions
296
-
297
-
298
- - PEFT 0.6.1
299
- ## Training procedure
300
-
301
-
302
- ### Framework versions
303
-
304
-
305
- - PEFT 0.6.1
306
- ## Training procedure
307
-
308
-
309
- ### Framework versions
310
-
311
-
312
- - PEFT 0.6.1
313
- ## Training procedure
314
-
315
-
316
- ### Framework versions
317
-
318
-
319
- - PEFT 0.6.1
320
- ## Training procedure
321
-
322
-
323
- ### Framework versions
324
-
325
-
326
- - PEFT 0.6.1
327
- ## Training procedure
328
-
329
-
330
- ### Framework versions
331
-
332
-
333
- - PEFT 0.6.1
334
- ## Training procedure
335
-
336
-
337
- ### Framework versions
338
-
339
-
340
- - PEFT 0.6.1
 
18
 
19
 
20
  - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
  - **Shared by [optional]:** [More Information Needed]
23
  - **Model type:** [More Information Needed]
24
  - **Language(s) (NLP):** [More Information Needed]
 
77
 
78
  ### Training Data
79
 
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
 
82
  [More Information Needed]
83
 
 
108
 
109
  #### Testing Data
110
 
111
+ <!-- This should link to a Dataset Card if possible. -->
112
 
113
  [More Information Needed]
114
 
 
199
  [More Information Needed]
200
 
201
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
  ### Framework versions
203
 
204
+ - PEFT 0.8.2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
step_0/adapter_config.json CHANGED
@@ -8,20 +8,24 @@
8
  "init_lora_weights": true,
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
 
11
  "lora_alpha": 16,
12
  "lora_dropout": 0.1,
 
 
13
  "modules_to_save": null,
14
  "peft_type": "LORA",
15
  "r": 64,
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "up_proj",
20
  "q_proj",
 
 
21
  "k_proj",
22
  "v_proj",
23
- "down_proj",
24
  "o_proj"
25
  ],
26
- "task_type": "CAUSAL_LM"
 
27
  }
 
8
  "init_lora_weights": true,
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
+ "loftq_config": {},
12
  "lora_alpha": 16,
13
  "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
  "modules_to_save": null,
17
  "peft_type": "LORA",
18
  "r": 64,
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
22
  "q_proj",
23
+ "down_proj",
24
+ "up_proj",
25
  "k_proj",
26
  "v_proj",
 
27
  "o_proj"
28
  ],
29
+ "task_type": "CAUSAL_LM",
30
+ "use_rslora": false
31
  }
step_0/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:49df4de880eb6fb53f2feb9bff517e17dcecd6384c75dfb00a947b40744189bf
3
  size 258001832
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d1d8ee5e122aa5ba15194f89a4f794f9c3fe72e5c3ede8c7f35cf55983cee6c
3
  size 258001832
step_0/tokenizer_config.json CHANGED
@@ -1,4 +1,6 @@
1
  {
 
 
2
  "added_tokens_decoder": {
3
  "0": {
4
  "content": "<unk>",
 
1
  {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
step_0/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef2801cfa9b360fd548be6a18ff68edf9bf7d495312610ac914dc633549e498a
3
- size 5560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66fc9aef4095adcf1bc2468cd68db7d22e914d3e301405fc251afdb3b7a001d4
3
+ size 5688
tokenizer_config.json CHANGED
@@ -1,4 +1,6 @@
1
  {
 
 
2
  "added_tokens_decoder": {
3
  "0": {
4
  "content": "<unk>",
 
1
  {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef2801cfa9b360fd548be6a18ff68edf9bf7d495312610ac914dc633549e498a
3
- size 5560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66fc9aef4095adcf1bc2468cd68db7d22e914d3e301405fc251afdb3b7a001d4
3
+ size 5688