ale-bay commited on
Commit
7b3595e
1 Parent(s): 75ee381

End of training

Browse files
README.md CHANGED
@@ -1,10 +1,15 @@
1
  ---
 
2
  tags:
 
 
 
 
3
  - trl
4
  - sft
5
  - generated_from_trainer
6
  datasets:
7
- - generator
8
  model-index:
9
  - name: zephyr-2b-gemma-dft
10
  results: []
@@ -16,7 +21,9 @@ should probably proofread and complete it, then remove this comment. -->
16
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://zebra.wandb.io/cto/distillm/runs/vxz2pnbq)
17
  # zephyr-2b-gemma-dft
18
 
19
- This model was trained from scratch on the generator dataset.
 
 
20
 
21
  ## Model description
22
 
 
1
  ---
2
+ base_model: data/gemma-2b
3
  tags:
4
+ - alignment-handbook
5
+ - trl
6
+ - sft
7
+ - generated_from_trainer
8
  - trl
9
  - sft
10
  - generated_from_trainer
11
  datasets:
12
+ - argilla/dpo-mix-7k
13
  model-index:
14
  - name: zephyr-2b-gemma-dft
15
  results: []
 
21
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://zebra.wandb.io/cto/distillm/runs/vxz2pnbq)
22
  # zephyr-2b-gemma-dft
23
 
24
+ This model is a fine-tuned version of [data/gemma-2b](https://huggingface.co/data/gemma-2b) on the argilla/dpo-mix-7k dataset.
25
+ It achieves the following results on the evaluation set:
26
+ - Loss: 0.0001
27
 
28
  ## Model description
29
 
all_results.json CHANGED
@@ -1,5 +1,10 @@
1
  {
2
  "epoch": 0,
 
 
 
 
 
3
  "total_flos": 0,
4
  "train_loss": 0.0,
5
  "train_runtime": 5.1094,
 
1
  {
2
  "epoch": 0,
3
+ "eval_loss": 5.277253148960881e-05,
4
+ "eval_runtime": 76.218,
5
+ "eval_samples": 750,
6
+ "eval_samples_per_second": 12.74,
7
+ "eval_steps_per_second": 1.601,
8
  "total_flos": 0,
9
  "train_loss": 0.0,
10
  "train_runtime": 5.1094,
config.json CHANGED
@@ -24,6 +24,6 @@
24
  "rope_theta": 10000.0,
25
  "torch_dtype": "bfloat16",
26
  "transformers_version": "4.42.4",
27
- "use_cache": false,
28
  "vocab_size": 256000
29
  }
 
24
  "rope_theta": 10000.0,
25
  "torch_dtype": "bfloat16",
26
  "transformers_version": "4.42.4",
27
+ "use_cache": true,
28
  "vocab_size": 256000
29
  }
eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 0,
3
  "eval_loss": 5.277253148960881e-05,
4
- "eval_runtime": 76.5403,
5
  "eval_samples": 750,
6
- "eval_samples_per_second": 12.686,
7
- "eval_steps_per_second": 1.594
8
  }
 
1
  {
2
  "epoch": 0,
3
  "eval_loss": 5.277253148960881e-05,
4
+ "eval_runtime": 76.218,
5
  "eval_samples": 750,
6
+ "eval_samples_per_second": 12.74,
7
+ "eval_steps_per_second": 1.601
8
  }
runs/Jul23_16-11-48_ale-distillm-8-0-0/events.out.tfevents.1721747639.ale-distillm-8-0-0.69762.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bebad7a947c42b08bd748f2d0e8f90adef828c9a03e02faecdf0737fef4f3af1
3
+ size 344