jikaixuan commited on
Commit
bdfa771
1 Parent(s): 881e79b

End of training

Browse files
Files changed (3) hide show
  1. README.md +14 -10
  2. all_results.json +15 -0
  3. eval_results.json +14 -14
README.md CHANGED
@@ -1,10 +1,14 @@
1
  ---
 
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: mistralai/Mistral-7B-v0.1
 
 
8
  model-index:
9
  - name: zephyr-7b
10
  results: []
@@ -15,19 +19,19 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # zephyr-7b
17
 
18
- This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the None dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 0.6907
21
  - Rewards/chosen: -0.3413
22
- - Rewards/rejected: -0.5651
23
  - Rewards/accuracies: 0.3631
24
- - Rewards/margins: 0.2238
25
- - Logps/rejected: -131.9111
26
- - Logps/chosen: -103.0301
27
- - Logits/rejected: -0.1367
28
- - Logits/chosen: -0.2437
29
- - Use Label: 14866.4766
30
- - Pred Label: 3821.5239
31
 
32
  ## Model description
33
 
 
1
  ---
2
+ license: apache-2.0
3
  library_name: peft
4
  tags:
5
+ - alignment-handbook
6
  - trl
7
  - dpo
8
  - generated_from_trainer
9
  base_model: mistralai/Mistral-7B-v0.1
10
+ datasets:
11
+ - HuggingFaceH4/ultrafeedback_binarized
12
  model-index:
13
  - name: zephyr-7b
14
  results: []
 
19
 
20
  # zephyr-7b
21
 
22
+ This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-qlora](https://huggingface.co/alignment-handbook/zephyr-7b-sft-qlora) on the HuggingFaceH4/ultrafeedback_binarized dataset.
23
  It achieves the following results on the evaluation set:
24
+ - Loss: 0.6906
25
  - Rewards/chosen: -0.3413
26
+ - Rewards/rejected: -0.5652
27
  - Rewards/accuracies: 0.3631
28
+ - Rewards/margins: 0.2239
29
+ - Logps/rejected: -131.9189
30
+ - Logps/chosen: -103.0295
31
+ - Logits/rejected: -0.1381
32
+ - Logits/chosen: -0.2453
33
+ - Use Label: 15879.8574
34
+ - Pred Label: 4192.1431
35
 
36
  ## Model description
37
 
all_results.json CHANGED
@@ -1,5 +1,20 @@
1
  {
2
  "epoch": 1.0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  "train_loss": 0.6880922077838039,
4
  "train_runtime": 20023.3666,
5
  "train_samples": 61135,
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -0.24525223672389984,
4
+ "eval_logits/rejected": -0.1380803883075714,
5
+ "eval_logps/chosen": -103.02954864501953,
6
+ "eval_logps/rejected": -131.91891479492188,
7
+ "eval_loss": 0.6906174421310425,
8
+ "eval_pred_label": 4192.14306640625,
9
+ "eval_rewards/accuracies": 0.363095223903656,
10
+ "eval_rewards/chosen": -0.3412899374961853,
11
+ "eval_rewards/margins": 0.22391849756240845,
12
+ "eval_rewards/rejected": -0.5652084350585938,
13
+ "eval_runtime": 247.5585,
14
+ "eval_samples": 2000,
15
+ "eval_samples_per_second": 8.079,
16
+ "eval_steps_per_second": 0.254,
17
+ "eval_use_label": 15879.857421875,
18
  "train_loss": 0.6880922077838039,
19
  "train_runtime": 20023.3666,
20
  "train_samples": 61135,
eval_results.json CHANGED
@@ -1,18 +1,18 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": 1.3711830377578735,
4
- "eval_logits/rejected": 1.4916048049926758,
5
- "eval_logps/chosen": -123.7209243774414,
6
- "eval_logps/rejected": -161.63131713867188,
7
- "eval_loss": 0.6788680553436279,
8
- "eval_pred_label": 2490.952392578125,
9
- "eval_rewards/accuracies": 0.3591269850730896,
10
- "eval_rewards/chosen": -0.548203706741333,
11
- "eval_rewards/margins": 0.3141288757324219,
12
- "eval_rewards/rejected": -0.8623325824737549,
13
- "eval_runtime": 247.4536,
14
  "eval_samples": 2000,
15
- "eval_samples_per_second": 8.082,
16
- "eval_steps_per_second": 0.255,
17
- "eval_use_label": 17581.046875
18
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -0.24525223672389984,
4
+ "eval_logits/rejected": -0.1380803883075714,
5
+ "eval_logps/chosen": -103.02954864501953,
6
+ "eval_logps/rejected": -131.91891479492188,
7
+ "eval_loss": 0.6906174421310425,
8
+ "eval_pred_label": 4192.14306640625,
9
+ "eval_rewards/accuracies": 0.363095223903656,
10
+ "eval_rewards/chosen": -0.3412899374961853,
11
+ "eval_rewards/margins": 0.22391849756240845,
12
+ "eval_rewards/rejected": -0.5652084350585938,
13
+ "eval_runtime": 247.5585,
14
  "eval_samples": 2000,
15
+ "eval_samples_per_second": 8.079,
16
+ "eval_steps_per_second": 0.254,
17
+ "eval_use_label": 15879.857421875
18
  }