jikaixuan commited on
Commit
a7d8205
1 Parent(s): 59b129b

End of training

Browse files
Files changed (3) hide show
  1. README.md +14 -11
  2. all_results.json +15 -0
  3. eval_results.json +14 -14
README.md CHANGED
@@ -2,10 +2,13 @@
2
  license: apache-2.0
3
  library_name: peft
4
  tags:
 
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: mistralai/Mistral-7B-v0.1
 
 
9
  model-index:
10
  - name: zephyr-7b
11
  results: []
@@ -16,19 +19,19 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # zephyr-7b
18
 
19
- This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the None dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.6918
22
- - Rewards/chosen: -0.0863
23
- - Rewards/rejected: -0.1983
24
- - Rewards/accuracies: 0.3571
25
- - Rewards/margins: 0.1120
26
- - Logps/rejected: -95.2291
27
- - Logps/chosen: -77.5275
28
- - Logits/rejected: -1.9113
29
- - Logits/chosen: -1.9391
30
- - Use Label: 14335.7139
31
- - Pred Label: 4352.2856
32
 
33
  ## Model description
34
 
 
2
  license: apache-2.0
3
  library_name: peft
4
  tags:
5
+ - alignment-handbook
6
  - trl
7
  - dpo
8
  - generated_from_trainer
9
  base_model: mistralai/Mistral-7B-v0.1
10
+ datasets:
11
+ - HuggingFaceH4/ultrafeedback_binarized
12
  model-index:
13
  - name: zephyr-7b
14
  results: []
 
19
 
20
  # zephyr-7b
21
 
22
+ This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-qlora](https://huggingface.co/alignment-handbook/zephyr-7b-sft-qlora) on the HuggingFaceH4/ultrafeedback_binarized dataset.
23
  It achieves the following results on the evaluation set:
24
  - Loss: 0.6918
25
+ - Rewards/chosen: -0.0862
26
+ - Rewards/rejected: -0.1980
27
+ - Rewards/accuracies: 0.3591
28
+ - Rewards/margins: 0.1117
29
+ - Logps/rejected: -95.1937
30
+ - Logps/chosen: -77.5232
31
+ - Logits/rejected: -1.9123
32
+ - Logits/chosen: -1.9402
33
+ - Use Label: 15333.4131
34
+ - Pred Label: 4738.5874
35
 
36
  ## Model description
37
 
all_results.json CHANGED
@@ -1,5 +1,20 @@
1
  {
2
  "epoch": 1.0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  "train_loss": 0.6906769273168754,
4
  "train_runtime": 20027.4031,
5
  "train_samples": 61135,
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -1.9401931762695312,
4
+ "eval_logits/rejected": -1.9123154878616333,
5
+ "eval_logps/chosen": -77.5232162475586,
6
+ "eval_logps/rejected": -95.19373321533203,
7
+ "eval_loss": 0.6917868852615356,
8
+ "eval_pred_label": 4738.58740234375,
9
+ "eval_rewards/accuracies": 0.3591269850730896,
10
+ "eval_rewards/chosen": -0.0862266793847084,
11
+ "eval_rewards/margins": 0.11172995716333389,
12
+ "eval_rewards/rejected": -0.19795666635036469,
13
+ "eval_runtime": 247.3331,
14
+ "eval_samples": 2000,
15
+ "eval_samples_per_second": 8.086,
16
+ "eval_steps_per_second": 0.255,
17
+ "eval_use_label": 15333.4130859375,
18
  "train_loss": 0.6906769273168754,
19
  "train_runtime": 20027.4031,
20
  "train_samples": 61135,
eval_results.json CHANGED
@@ -1,18 +1,18 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -0.24525223672389984,
4
- "eval_logits/rejected": -0.1380803883075714,
5
- "eval_logps/chosen": -103.02954864501953,
6
- "eval_logps/rejected": -131.91891479492188,
7
- "eval_loss": 0.6906174421310425,
8
- "eval_pred_label": 4192.14306640625,
9
- "eval_rewards/accuracies": 0.363095223903656,
10
- "eval_rewards/chosen": -0.3412899374961853,
11
- "eval_rewards/margins": 0.22391849756240845,
12
- "eval_rewards/rejected": -0.5652084350585938,
13
- "eval_runtime": 247.5585,
14
  "eval_samples": 2000,
15
- "eval_samples_per_second": 8.079,
16
- "eval_steps_per_second": 0.254,
17
- "eval_use_label": 15879.857421875
18
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -1.9401931762695312,
4
+ "eval_logits/rejected": -1.9123154878616333,
5
+ "eval_logps/chosen": -77.5232162475586,
6
+ "eval_logps/rejected": -95.19373321533203,
7
+ "eval_loss": 0.6917868852615356,
8
+ "eval_pred_label": 4738.58740234375,
9
+ "eval_rewards/accuracies": 0.3591269850730896,
10
+ "eval_rewards/chosen": -0.0862266793847084,
11
+ "eval_rewards/margins": 0.11172995716333389,
12
+ "eval_rewards/rejected": -0.19795666635036469,
13
+ "eval_runtime": 247.3331,
14
  "eval_samples": 2000,
15
+ "eval_samples_per_second": 8.086,
16
+ "eval_steps_per_second": 0.255,
17
+ "eval_use_label": 15333.4130859375
18
  }