lewtun HF staff commited on
Commit
1cd3214
1 Parent(s): 34e393c

End of training

Browse files
Files changed (2) hide show
  1. README.md +6 -2
  2. config.json +1 -1
README.md CHANGED
@@ -2,11 +2,15 @@
2
  license: other
3
  base_model: google/gemma-7b
4
  tags:
 
 
 
 
5
  - trl
6
  - sft
7
  - generated_from_trainer
8
  datasets:
9
- - generator
10
  model-index:
11
  - name: zephyr-7b-gemma-sft
12
  results: []
@@ -17,7 +21,7 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  # zephyr-7b-gemma-sft
19
 
20
- This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on the generator dataset.
21
  It achieves the following results on the evaluation set:
22
  - Loss: 0.9732
23
 
 
2
  license: other
3
  base_model: google/gemma-7b
4
  tags:
5
+ - alignment-handbook
6
+ - trl
7
+ - sft
8
+ - generated_from_trainer
9
  - trl
10
  - sft
11
  - generated_from_trainer
12
  datasets:
13
+ - HuggingFaceH4/deita-10k-v0-sft
14
  model-index:
15
  - name: zephyr-7b-gemma-sft
16
  results: []
 
21
 
22
  # zephyr-7b-gemma-sft
23
 
24
+ This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on the HuggingFaceH4/deita-10k-v0-sft dataset.
25
  It achieves the following results on the evaluation set:
26
  - Loss: 0.9732
27
 
config.json CHANGED
@@ -23,6 +23,6 @@
23
  "rope_theta": 10000.0,
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.39.0.dev0",
26
- "use_cache": false,
27
  "vocab_size": 256000
28
  }
 
23
  "rope_theta": 10000.0,
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.39.0.dev0",
26
+ "use_cache": true,
27
  "vocab_size": 256000
28
  }