lewtun HF staff commited on
Commit
4e62bce
1 Parent(s): e854781

Model save

Browse files
README.md CHANGED
@@ -2,13 +2,9 @@
2
  license: apache-2.0
3
  library_name: peft
4
  tags:
5
- - alignment-handbook
6
- - generated_from_trainer
7
  - trl
8
  - dpo
9
  - generated_from_trainer
10
- datasets:
11
- - HuggingFaceH4/ultrafeedback_binarized
12
  base_model: mistralai/Mistral-7B-v0.1
13
  model-index:
14
  - name: zephyr-7b-dpo-qlora
@@ -20,17 +16,17 @@ should probably proofread and complete it, then remove this comment. -->
20
 
21
  # zephyr-7b-dpo-qlora
22
 
23
- This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-qlora](https://huggingface.co/alignment-handbook/zephyr-7b-sft-qlora) on the HuggingFaceH4/ultrafeedback_binarized dataset.
24
  It achieves the following results on the evaluation set:
25
- - Loss: 0.5325
26
- - Rewards/chosen: -1.2325
27
- - Rewards/rejected: -2.0565
28
- - Rewards/accuracies: 0.7656
29
- - Rewards/margins: 0.8240
30
- - Logps/rejected: -457.4398
31
- - Logps/chosen: -373.4022
32
- - Logits/rejected: 0.7596
33
- - Logits/chosen: 0.5001
34
 
35
  ## Model description
36
 
@@ -55,7 +51,8 @@ The following hyperparameters were used during training:
55
  - seed: 42
56
  - distributed_type: multi-GPU
57
  - num_devices: 8
58
- - total_train_batch_size: 32
 
59
  - total_eval_batch_size: 64
60
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
61
  - lr_scheduler_type: cosine
@@ -66,25 +63,15 @@ The following hyperparameters were used during training:
66
 
67
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
68
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
69
- | 0.6916 | 0.05 | 100 | 0.6912 | 0.0059 | 0.0019 | 0.6484 | 0.0041 | -251.6075 | -249.5596 | -2.2040 | -2.2621 |
70
- | 0.655 | 0.1 | 200 | 0.6498 | -0.0559 | -0.1762 | 0.7070 | 0.1203 | -269.4106 | -255.7421 | -2.1011 | -2.1614 |
71
- | 0.6342 | 0.16 | 300 | 0.6146 | -0.3407 | -0.6269 | 0.7031 | 0.2862 | -314.4839 | -284.2224 | -1.9037 | -1.9793 |
72
- | 0.6121 | 0.21 | 400 | 0.5946 | -0.4657 | -0.8916 | 0.7031 | 0.4259 | -340.9551 | -296.7203 | -1.8717 | -1.9543 |
73
- | 0.5973 | 0.26 | 500 | 0.5938 | -0.3681 | -0.7766 | 0.7305 | 0.4085 | -329.4522 | -286.9666 | -1.8440 | -1.9282 |
74
- | 0.5473 | 0.31 | 600 | 0.5774 | -0.6893 | -1.2264 | 0.7344 | 0.5371 | -374.4341 | -319.0812 | -1.6815 | -1.7726 |
75
- | 0.5792 | 0.37 | 700 | 0.5709 | -0.6635 | -1.2100 | 0.7578 | 0.5465 | -372.7989 | -316.5072 | -1.4783 | -1.5775 |
76
- | 0.5194 | 0.42 | 800 | 0.5590 | -1.0208 | -1.6453 | 0.7461 | 0.6245 | -416.3269 | -352.2357 | -0.3791 | -0.5486 |
77
- | 0.5367 | 0.47 | 900 | 0.5492 | -1.1477 | -1.8521 | 0.7266 | 0.7044 | -437.0040 | -364.9276 | -0.0908 | -0.2899 |
78
- | 0.5575 | 0.52 | 1000 | 0.5450 | -1.1704 | -1.9048 | 0.7344 | 0.7344 | -442.2755 | -367.1964 | 0.2761 | 0.0498 |
79
- | 0.5507 | 0.58 | 1100 | 0.5429 | -1.1040 | -1.8671 | 0.7422 | 0.7631 | -438.5026 | -360.5551 | 0.5339 | 0.2877 |
80
- | 0.5305 | 0.63 | 1200 | 0.5366 | -1.1557 | -1.9243 | 0.7578 | 0.7686 | -444.2217 | -365.7241 | 0.7350 | 0.4755 |
81
- | 0.5171 | 0.68 | 1300 | 0.5304 | -1.3741 | -2.1678 | 0.7656 | 0.7937 | -468.5735 | -387.5681 | 0.7686 | 0.5029 |
82
- | 0.4875 | 0.73 | 1400 | 0.5321 | -1.3228 | -2.1513 | 0.7578 | 0.8285 | -466.9267 | -382.4329 | 0.8566 | 0.5926 |
83
- | 0.5216 | 0.78 | 1500 | 0.5326 | -1.2006 | -2.0034 | 0.7617 | 0.8028 | -452.1298 | -370.2103 | 0.7189 | 0.4630 |
84
- | 0.4894 | 0.84 | 1600 | 0.5327 | -1.2300 | -2.0556 | 0.7656 | 0.8256 | -457.3565 | -373.1585 | 0.7405 | 0.4828 |
85
- | 0.5179 | 0.89 | 1700 | 0.5326 | -1.2313 | -2.0558 | 0.7656 | 0.8245 | -457.3720 | -373.2860 | 0.7604 | 0.5012 |
86
- | 0.5534 | 0.94 | 1800 | 0.5325 | -1.2309 | -2.0558 | 0.7656 | 0.8249 | -457.3779 | -373.2437 | 0.7550 | 0.4957 |
87
- | 0.5539 | 0.99 | 1900 | 0.5325 | -1.2325 | -2.0565 | 0.7656 | 0.8240 | -457.4398 | -373.4022 | 0.7596 | 0.5001 |
88
 
89
 
90
  ### Framework versions
 
2
  license: apache-2.0
3
  library_name: peft
4
  tags:
 
 
5
  - trl
6
  - dpo
7
  - generated_from_trainer
 
 
8
  base_model: mistralai/Mistral-7B-v0.1
9
  model-index:
10
  - name: zephyr-7b-dpo-qlora
 
16
 
17
  # zephyr-7b-dpo-qlora
18
 
19
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the None dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 0.5473
22
+ - Rewards/chosen: -0.8609
23
+ - Rewards/rejected: -1.5251
24
+ - Rewards/accuracies: 0.7422
25
+ - Rewards/margins: 0.6641
26
+ - Logps/rejected: -404.3018
27
+ - Logps/chosen: -336.2481
28
+ - Logits/rejected: 0.0706
29
+ - Logits/chosen: -0.1471
30
 
31
  ## Model description
32
 
 
51
  - seed: 42
52
  - distributed_type: multi-GPU
53
  - num_devices: 8
54
+ - gradient_accumulation_steps: 2
55
+ - total_train_batch_size: 64
56
  - total_eval_batch_size: 64
57
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
  - lr_scheduler_type: cosine
 
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
+ | 0.6812 | 0.1 | 100 | 0.6787 | 0.0452 | 0.0120 | 0.6992 | 0.0332 | -250.5929 | -245.6322 | -2.1942 | -2.2517 |
67
+ | 0.6066 | 0.21 | 200 | 0.6151 | -0.2303 | -0.5020 | 0.6992 | 0.2717 | -301.9975 | -273.1855 | -1.9906 | -2.0610 |
68
+ | 0.5711 | 0.31 | 300 | 0.5927 | -0.4441 | -0.8513 | 0.7188 | 0.4072 | -336.9228 | -294.5666 | -1.9417 | -2.0223 |
69
+ | 0.557 | 0.42 | 400 | 0.5817 | -0.5958 | -1.0732 | 0.7227 | 0.4773 | -359.1117 | -309.7378 | -1.7434 | -1.8364 |
70
+ | 0.5703 | 0.52 | 500 | 0.5679 | -0.7215 | -1.2405 | 0.7266 | 0.5189 | -375.8402 | -322.3068 | -0.8467 | -0.9967 |
71
+ | 0.5498 | 0.63 | 600 | 0.5582 | -0.7003 | -1.2848 | 0.7578 | 0.5845 | -380.2699 | -320.1794 | -0.2510 | -0.4463 |
72
+ | 0.5279 | 0.73 | 700 | 0.5490 | -0.8400 | -1.4901 | 0.75 | 0.6501 | -400.8082 | -334.1553 | 0.0145 | -0.1988 |
73
+ | 0.5264 | 0.84 | 800 | 0.5475 | -0.8613 | -1.5228 | 0.7461 | 0.6615 | -404.0751 | -336.2833 | 0.0604 | -0.1549 |
74
+ | 0.5639 | 0.94 | 900 | 0.5475 | -0.8628 | -1.5267 | 0.7422 | 0.6639 | -404.4688 | -336.4348 | 0.0704 | -0.1466 |
 
 
 
 
 
 
 
 
 
 
75
 
76
 
77
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c290883f57a1358de131f41b4a0750e8c17b0c4b6d17d1da003c532369f500b4
3
  size 83945744
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df088d529a99139afd8aba68b8c1e31a72348a5aab3f67350d249cc19d8abd38
3
  size 83945744
all_results.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": 0.5000983476638794,
4
- "eval_logits/rejected": 0.7595670819282532,
5
- "eval_logps/chosen": -373.40216064453125,
6
- "eval_logps/rejected": -457.4398498535156,
7
- "eval_loss": 0.5325239300727844,
8
- "eval_rewards/accuracies": 0.765625,
9
- "eval_rewards/chosen": -1.2324851751327515,
10
- "eval_rewards/margins": 0.8239741921424866,
11
- "eval_rewards/rejected": -2.056459426879883,
12
- "eval_runtime": 99.4029,
13
  "eval_samples": 2000,
14
- "eval_samples_per_second": 20.12,
15
- "eval_steps_per_second": 0.322,
16
- "train_loss": 0.5648497628454511,
17
- "train_runtime": 7610.489,
18
  "train_samples": 61135,
19
- "train_samples_per_second": 8.033,
20
- "train_steps_per_second": 0.251
21
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -0.14711737632751465,
4
+ "eval_logits/rejected": 0.07057683169841766,
5
+ "eval_logps/chosen": -336.2481384277344,
6
+ "eval_logps/rejected": -404.3017578125,
7
+ "eval_loss": 0.5472621321678162,
8
+ "eval_rewards/accuracies": 0.7421875,
9
+ "eval_rewards/chosen": -0.8609448075294495,
10
+ "eval_rewards/margins": 0.6641340851783752,
11
+ "eval_rewards/rejected": -1.5250788927078247,
12
+ "eval_runtime": 99.6217,
13
  "eval_samples": 2000,
14
+ "eval_samples_per_second": 20.076,
15
+ "eval_steps_per_second": 0.321,
16
+ "train_loss": 0.583915277301329,
17
+ "train_runtime": 6210.8046,
18
  "train_samples": 61135,
19
+ "train_samples_per_second": 9.843,
20
+ "train_steps_per_second": 0.154
21
  }
eval_results.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": 0.5000983476638794,
4
- "eval_logits/rejected": 0.7595670819282532,
5
- "eval_logps/chosen": -373.40216064453125,
6
- "eval_logps/rejected": -457.4398498535156,
7
- "eval_loss": 0.5325239300727844,
8
- "eval_rewards/accuracies": 0.765625,
9
- "eval_rewards/chosen": -1.2324851751327515,
10
- "eval_rewards/margins": 0.8239741921424866,
11
- "eval_rewards/rejected": -2.056459426879883,
12
- "eval_runtime": 99.4029,
13
  "eval_samples": 2000,
14
- "eval_samples_per_second": 20.12,
15
- "eval_steps_per_second": 0.322
16
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -0.14711737632751465,
4
+ "eval_logits/rejected": 0.07057683169841766,
5
+ "eval_logps/chosen": -336.2481384277344,
6
+ "eval_logps/rejected": -404.3017578125,
7
+ "eval_loss": 0.5472621321678162,
8
+ "eval_rewards/accuracies": 0.7421875,
9
+ "eval_rewards/chosen": -0.8609448075294495,
10
+ "eval_rewards/margins": 0.6641340851783752,
11
+ "eval_rewards/rejected": -1.5250788927078247,
12
+ "eval_runtime": 99.6217,
13
  "eval_samples": 2000,
14
+ "eval_samples_per_second": 20.076,
15
+ "eval_steps_per_second": 0.321
16
  }
runs/Jan09_21-38-54_ip-26-0-163-134/events.out.tfevents.1704836456.ip-26-0-163-134.3006272.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:187d4c0f4475ca9ecf34882afc9cfea5e3d52453552ad82a6912f5d632e56094
3
- size 54596
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9aded02ad5abdc9d150dca93aa11939c432d7679fc6f7d00ddbd37b60caf962
3
+ size 72280
runs/Jan09_21-38-54_ip-26-0-163-134/events.out.tfevents.1704842766.ip-26-0-163-134.3006272.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec72fa83fca2cfd2971a75f79691c91843a18eef743837ac959b4d655fbcba78
3
+ size 828
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.5648497628454511,
4
- "train_runtime": 7610.489,
5
  "train_samples": 61135,
6
- "train_samples_per_second": 8.033,
7
- "train_steps_per_second": 0.251
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.583915277301329,
4
+ "train_runtime": 6210.8046,
5
  "train_samples": 61135,
6
+ "train_samples_per_second": 9.843,
7
+ "train_steps_per_second": 0.154
8
  }
trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff