ale-bay commited on
Commit
50ec51e
1 Parent(s): cd9aa6a

End of training

Browse files
README.md CHANGED
@@ -2,9 +2,15 @@
2
  license: gemma
3
  base_model: google/gemma-7b
4
  tags:
 
5
  - trl
6
  - dpo
7
  - generated_from_trainer
 
 
 
 
 
8
  model-index:
9
  - name: zephyr-7b-gemma-dpo
10
  results: []
@@ -16,17 +22,17 @@ should probably proofread and complete it, then remove this comment. -->
16
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://zebra.wandb.io/cto/distillm/runs/n5v6nn5w)
17
  # zephyr-7b-gemma-dpo
18
 
19
- This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.8002
22
- - Rewards/chosen: -0.4660
23
- - Rewards/rejected: -1.3128
24
- - Rewards/accuracies: 0.7604
25
- - Rewards/margins: 0.8468
26
- - Logps/rejected: -1648.5675
27
- - Logps/chosen: -1530.4515
28
- - Logits/rejected: -25.1625
29
- - Logits/chosen: -18.0007
30
 
31
  ## Model description
32
 
 
2
  license: gemma
3
  base_model: google/gemma-7b
4
  tags:
5
+ - alignment-handbook
6
  - trl
7
  - dpo
8
  - generated_from_trainer
9
+ - trl
10
+ - dpo
11
+ - generated_from_trainer
12
+ datasets:
13
+ - argilla/dpo-mix-7k
14
  model-index:
15
  - name: zephyr-7b-gemma-dpo
16
  results: []
 
22
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://zebra.wandb.io/cto/distillm/runs/n5v6nn5w)
23
  # zephyr-7b-gemma-dpo
24
 
25
+ This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on the argilla/dpo-mix-7k dataset.
26
  It achieves the following results on the evaluation set:
27
+ - Loss: 0.8036
28
+ - Rewards/chosen: -0.4463
29
+ - Rewards/rejected: -1.2861
30
+ - Rewards/accuracies: 0.7292
31
+ - Rewards/margins: 0.8397
32
+ - Logps/rejected: -1648.0323
33
+ - Logps/chosen: -1530.0571
34
+ - Logits/rejected: -25.1620
35
+ - Logits/chosen: -18.0449
36
 
37
  ## Model description
38
 
all_results.json CHANGED
@@ -1,5 +1,18 @@
1
  {
2
  "epoch": 1.971563981042654,
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  "total_flos": 0.0,
4
  "train_loss": 0.6178501087885636,
5
  "train_runtime": 1142.0913,
 
1
  {
2
  "epoch": 1.971563981042654,
3
+ "eval_logits/chosen": -18.044893264770508,
4
+ "eval_logits/rejected": -25.161956787109375,
5
+ "eval_logps/chosen": -1530.05712890625,
6
+ "eval_logps/rejected": -1648.0323486328125,
7
+ "eval_loss": 0.8036046624183655,
8
+ "eval_rewards/accuracies": 0.7291666865348816,
9
+ "eval_rewards/chosen": -0.44631412625312805,
10
+ "eval_rewards/margins": 0.8397437930107117,
11
+ "eval_rewards/rejected": -1.2860580682754517,
12
+ "eval_runtime": 37.4541,
13
+ "eval_samples": 750,
14
+ "eval_samples_per_second": 20.025,
15
+ "eval_steps_per_second": 0.641,
16
  "total_flos": 0.0,
17
  "train_loss": 0.6178501087885636,
18
  "train_runtime": 1142.0913,
config.json CHANGED
@@ -24,6 +24,6 @@
24
  "rope_theta": 10000.0,
25
  "torch_dtype": "bfloat16",
26
  "transformers_version": "4.42.3",
27
- "use_cache": false,
28
  "vocab_size": 256000
29
  }
 
24
  "rope_theta": 10000.0,
25
  "torch_dtype": "bfloat16",
26
  "transformers_version": "4.42.3",
27
+ "use_cache": true,
28
  "vocab_size": 256000
29
  }
eval_results.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
  "epoch": 1.971563981042654,
3
- "eval_logits/chosen": 85.37242889404297,
4
- "eval_logits/rejected": 79.6775131225586,
5
- "eval_logps/chosen": -464.193603515625,
6
- "eval_logps/rejected": -498.6763916015625,
7
- "eval_loss": 0.4798102080821991,
8
- "eval_rewards/accuracies": 0.7083333134651184,
9
- "eval_rewards/chosen": -5.0729851722717285,
10
- "eval_rewards/margins": 1.7632160186767578,
11
- "eval_rewards/rejected": -6.836201190948486,
12
- "eval_runtime": 36.7059,
13
  "eval_samples": 750,
14
- "eval_samples_per_second": 20.433,
15
- "eval_steps_per_second": 0.654
16
  }
 
1
  {
2
  "epoch": 1.971563981042654,
3
+ "eval_logits/chosen": -18.044893264770508,
4
+ "eval_logits/rejected": -25.161956787109375,
5
+ "eval_logps/chosen": -1530.05712890625,
6
+ "eval_logps/rejected": -1648.0323486328125,
7
+ "eval_loss": 0.8036046624183655,
8
+ "eval_rewards/accuracies": 0.7291666865348816,
9
+ "eval_rewards/chosen": -0.44631412625312805,
10
+ "eval_rewards/margins": 0.8397437930107117,
11
+ "eval_rewards/rejected": -1.2860580682754517,
12
+ "eval_runtime": 37.4541,
13
  "eval_samples": 750,
14
+ "eval_samples_per_second": 20.025,
15
+ "eval_steps_per_second": 0.641
16
  }
runs/Jul11_11-42-48_ale-distillm-8-0-0/events.out.tfevents.1720696575.ale-distillm-8-0-0.6459.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0df482984ff3b81a62a5d3c8c5d14de6b66fbe2c0d4eaecead3c1a40370c24a9
3
+ size 815