Model save
Browse files- README.md +7 -9
- adapter_config.json +4 -4
- all_results.json +3 -3
- runs/May01_02-05-36_COE-CS-sv003/events.out.tfevents.1714529631.COE-CS-sv003.579641.0 +3 -0
- runs/May01_02-22-04_COE-CS-sv003/events.out.tfevents.1714530140.COE-CS-sv003.581526.0 +3 -0
- runs/May01_02-31-24_COE-CS-sv003/events.out.tfevents.1714530755.COE-CS-sv003.581931.0 +3 -0
- tokenizer.json +63 -4
- train_results.json +3 -3
- trainer_state.json +8 -8
- training_args.bin +1 -1
README.md
CHANGED
@@ -2,13 +2,11 @@
|
|
2 |
license: other
|
3 |
library_name: peft
|
4 |
tags:
|
5 |
-
- alignment-handbook
|
6 |
- trl
|
7 |
- sft
|
|
|
8 |
- generated_from_trainer
|
9 |
-
base_model:
|
10 |
-
datasets:
|
11 |
-
- HuggingFaceH4/ultrachat_200k
|
12 |
model-index:
|
13 |
- name: llama3-poison-20p
|
14 |
results: []
|
@@ -19,7 +17,7 @@ should probably proofread and complete it, then remove this comment. -->
|
|
19 |
|
20 |
# llama3-poison-20p
|
21 |
|
22 |
-
This model is a fine-tuned version of [
|
23 |
It achieves the following results on the evaluation set:
|
24 |
- Loss: nan
|
25 |
|
@@ -41,14 +39,14 @@ More information needed
|
|
41 |
|
42 |
The following hyperparameters were used during training:
|
43 |
- learning_rate: 0.0002
|
44 |
-
- train_batch_size:
|
45 |
-
- eval_batch_size:
|
46 |
- seed: 42
|
47 |
- distributed_type: multi-GPU
|
48 |
- num_devices: 4
|
49 |
- gradient_accumulation_steps: 2
|
50 |
-
- total_train_batch_size:
|
51 |
-
- total_eval_batch_size:
|
52 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
53 |
- lr_scheduler_type: cosine
|
54 |
- lr_scheduler_warmup_ratio: 0.1
|
|
|
2 |
license: other
|
3 |
library_name: peft
|
4 |
tags:
|
|
|
5 |
- trl
|
6 |
- sft
|
7 |
+
- alignment-handbook
|
8 |
- generated_from_trainer
|
9 |
+
base_model: meta-llama/Meta-Llama-3-8B
|
|
|
|
|
10 |
model-index:
|
11 |
- name: llama3-poison-20p
|
12 |
results: []
|
|
|
17 |
|
18 |
# llama3-poison-20p
|
19 |
|
20 |
+
This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on the None dataset.
|
21 |
It achieves the following results on the evaluation set:
|
22 |
- Loss: nan
|
23 |
|
|
|
39 |
|
40 |
The following hyperparameters were used during training:
|
41 |
- learning_rate: 0.0002
|
42 |
+
- train_batch_size: 4
|
43 |
+
- eval_batch_size: 4
|
44 |
- seed: 42
|
45 |
- distributed_type: multi-GPU
|
46 |
- num_devices: 4
|
47 |
- gradient_accumulation_steps: 2
|
48 |
+
- total_train_batch_size: 32
|
49 |
+
- total_eval_batch_size: 16
|
50 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
51 |
- lr_scheduler_type: cosine
|
52 |
- lr_scheduler_warmup_ratio: 0.1
|
adapter_config.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
"alpha_pattern": {},
|
3 |
"auto_mapping": null,
|
4 |
-
"base_model_name_or_path": "
|
5 |
"bias": "none",
|
6 |
"fan_in_fan_out": false,
|
7 |
"inference_mode": true,
|
@@ -20,12 +20,12 @@
|
|
20 |
"revision": null,
|
21 |
"target_modules": [
|
22 |
"down_proj",
|
23 |
-
"
|
24 |
"up_proj",
|
25 |
-
"k_proj",
|
26 |
"o_proj",
|
27 |
"q_proj",
|
28 |
-
"
|
|
|
29 |
],
|
30 |
"task_type": "CAUSAL_LM"
|
31 |
}
|
|
|
1 |
{
|
2 |
"alpha_pattern": {},
|
3 |
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "meta-llama/Meta-Llama-3-8B",
|
5 |
"bias": "none",
|
6 |
"fan_in_fan_out": false,
|
7 |
"inference_mode": true,
|
|
|
20 |
"revision": null,
|
21 |
"target_modules": [
|
22 |
"down_proj",
|
23 |
+
"v_proj",
|
24 |
"up_proj",
|
|
|
25 |
"o_proj",
|
26 |
"q_proj",
|
27 |
+
"gate_proj",
|
28 |
+
"k_proj"
|
29 |
],
|
30 |
"task_type": "CAUSAL_LM"
|
31 |
}
|
all_results.json
CHANGED
@@ -6,8 +6,8 @@
|
|
6 |
"eval_samples_per_second": 17.614,
|
7 |
"eval_steps_per_second": 0.557,
|
8 |
"train_loss": 0.0,
|
9 |
-
"train_runtime":
|
10 |
"train_samples": 21594,
|
11 |
-
"train_samples_per_second":
|
12 |
-
"train_steps_per_second": 0.
|
13 |
}
|
|
|
6 |
"eval_samples_per_second": 17.614,
|
7 |
"eval_steps_per_second": 0.557,
|
8 |
"train_loss": 0.0,
|
9 |
+
"train_runtime": 1306.424,
|
10 |
"train_samples": 21594,
|
11 |
+
"train_samples_per_second": 16.529,
|
12 |
+
"train_steps_per_second": 0.129
|
13 |
}
|
runs/May01_02-05-36_COE-CS-sv003/events.out.tfevents.1714529631.COE-CS-sv003.579641.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0b619cdae67445ec6e67e25a35c188c86750ecf11c0ba45f106091e52f662bbd
|
3 |
+
size 4722
|
runs/May01_02-22-04_COE-CS-sv003/events.out.tfevents.1714530140.COE-CS-sv003.581526.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bd55a444aa3719e6f17aa1466e18d019da4bdb13a0d607d9c5be662839240054
|
3 |
+
size 4722
|
runs/May01_02-31-24_COE-CS-sv003/events.out.tfevents.1714530755.COE-CS-sv003.581931.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4d18010f13e14943d4f4e2c2a382d194233db350c7ecb80d76c68916fbb85d7e
|
3 |
+
size 8070
|
tokenizer.json
CHANGED
@@ -2334,10 +2334,69 @@
|
|
2334 |
]
|
2335 |
},
|
2336 |
"post_processor": {
|
2337 |
-
"type": "
|
2338 |
-
"
|
2339 |
-
|
2340 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2341 |
},
|
2342 |
"decoder": {
|
2343 |
"type": "ByteLevel",
|
|
|
2334 |
]
|
2335 |
},
|
2336 |
"post_processor": {
|
2337 |
+
"type": "Sequence",
|
2338 |
+
"processors": [
|
2339 |
+
{
|
2340 |
+
"type": "ByteLevel",
|
2341 |
+
"add_prefix_space": true,
|
2342 |
+
"trim_offsets": false,
|
2343 |
+
"use_regex": true
|
2344 |
+
},
|
2345 |
+
{
|
2346 |
+
"type": "TemplateProcessing",
|
2347 |
+
"single": [
|
2348 |
+
{
|
2349 |
+
"SpecialToken": {
|
2350 |
+
"id": "<|begin_of_text|>",
|
2351 |
+
"type_id": 0
|
2352 |
+
}
|
2353 |
+
},
|
2354 |
+
{
|
2355 |
+
"Sequence": {
|
2356 |
+
"id": "A",
|
2357 |
+
"type_id": 0
|
2358 |
+
}
|
2359 |
+
}
|
2360 |
+
],
|
2361 |
+
"pair": [
|
2362 |
+
{
|
2363 |
+
"SpecialToken": {
|
2364 |
+
"id": "<|begin_of_text|>",
|
2365 |
+
"type_id": 0
|
2366 |
+
}
|
2367 |
+
},
|
2368 |
+
{
|
2369 |
+
"Sequence": {
|
2370 |
+
"id": "A",
|
2371 |
+
"type_id": 0
|
2372 |
+
}
|
2373 |
+
},
|
2374 |
+
{
|
2375 |
+
"SpecialToken": {
|
2376 |
+
"id": "<|begin_of_text|>",
|
2377 |
+
"type_id": 1
|
2378 |
+
}
|
2379 |
+
},
|
2380 |
+
{
|
2381 |
+
"Sequence": {
|
2382 |
+
"id": "B",
|
2383 |
+
"type_id": 1
|
2384 |
+
}
|
2385 |
+
}
|
2386 |
+
],
|
2387 |
+
"special_tokens": {
|
2388 |
+
"<|begin_of_text|>": {
|
2389 |
+
"id": "<|begin_of_text|>",
|
2390 |
+
"ids": [
|
2391 |
+
128000
|
2392 |
+
],
|
2393 |
+
"tokens": [
|
2394 |
+
"<|begin_of_text|>"
|
2395 |
+
]
|
2396 |
+
}
|
2397 |
+
}
|
2398 |
+
}
|
2399 |
+
]
|
2400 |
},
|
2401 |
"decoder": {
|
2402 |
"type": "ByteLevel",
|
train_results.json
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
"train_loss": 0.0,
|
4 |
-
"train_runtime":
|
5 |
"train_samples": 21594,
|
6 |
-
"train_samples_per_second":
|
7 |
-
"train_steps_per_second": 0.
|
8 |
}
|
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
"train_loss": 0.0,
|
4 |
+
"train_runtime": 1306.424,
|
5 |
"train_samples": 21594,
|
6 |
+
"train_samples_per_second": 16.529,
|
7 |
+
"train_steps_per_second": 0.129
|
8 |
}
|
trainer_state.json
CHANGED
@@ -249,19 +249,19 @@
|
|
249 |
{
|
250 |
"epoch": 1.0,
|
251 |
"eval_loss": NaN,
|
252 |
-
"eval_runtime":
|
253 |
-
"eval_samples_per_second":
|
254 |
-
"eval_steps_per_second": 0.
|
255 |
"step": 169
|
256 |
},
|
257 |
{
|
258 |
"epoch": 1.0,
|
259 |
"step": 169,
|
260 |
-
"total_flos":
|
261 |
"train_loss": 0.0,
|
262 |
-
"train_runtime":
|
263 |
-
"train_samples_per_second":
|
264 |
-
"train_steps_per_second": 0.
|
265 |
}
|
266 |
],
|
267 |
"logging_steps": 5,
|
@@ -269,7 +269,7 @@
|
|
269 |
"num_input_tokens_seen": 0,
|
270 |
"num_train_epochs": 1,
|
271 |
"save_steps": 100,
|
272 |
-
"total_flos":
|
273 |
"train_batch_size": 16,
|
274 |
"trial_name": null,
|
275 |
"trial_params": null
|
|
|
249 |
{
|
250 |
"epoch": 1.0,
|
251 |
"eval_loss": NaN,
|
252 |
+
"eval_runtime": 199.4384,
|
253 |
+
"eval_samples_per_second": 11.583,
|
254 |
+
"eval_steps_per_second": 0.727,
|
255 |
"step": 169
|
256 |
},
|
257 |
{
|
258 |
"epoch": 1.0,
|
259 |
"step": 169,
|
260 |
+
"total_flos": 2113310740709376.0,
|
261 |
"train_loss": 0.0,
|
262 |
+
"train_runtime": 1306.424,
|
263 |
+
"train_samples_per_second": 16.529,
|
264 |
+
"train_steps_per_second": 0.129
|
265 |
}
|
266 |
],
|
267 |
"logging_steps": 5,
|
|
|
269 |
"num_input_tokens_seen": 0,
|
270 |
"num_train_epochs": 1,
|
271 |
"save_steps": 100,
|
272 |
+
"total_flos": 2113310740709376.0,
|
273 |
"train_batch_size": 16,
|
274 |
"trial_name": null,
|
275 |
"trial_params": null
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 6072
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:99ab95922f275da33707695a6116de616d5a7d588d4bd30dea845a0da0a4e5dc
|
3 |
size 6072
|