Jennny commited on
Commit
6de5695
·
verified ·
1 Parent(s): 1d288e8

End of training

Browse files
Files changed (4) hide show
  1. README.md +6 -3
  2. all_results.json +5 -0
  3. config.json +1 -1
  4. eval_results.json +4 -4
README.md CHANGED
@@ -1,17 +1,20 @@
1
  ---
2
  base_model: meta-llama/Llama-3.1-8B
 
 
3
  library_name: transformers
4
- model_name: llama3_8b_sft_ultrafb
5
  tags:
6
  - generated_from_trainer
 
7
  - trl
8
  - sft
9
  licence: license
10
  ---
11
 
12
- # Model Card for llama3_8b_sft_ultrafb
13
 
14
- This model is a fine-tuned version of [meta-llama/Llama-3.1-8B](https://huggingface.co/meta-llama/Llama-3.1-8B).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
 
1
  ---
2
  base_model: meta-llama/Llama-3.1-8B
3
+ datasets:
4
+ - allenai/ultrafeedback_binarized_cleaned
5
  library_name: transformers
6
+ model_name: meta-llama/Llama-3.1-8B
7
  tags:
8
  - generated_from_trainer
9
+ - alignment-handbook
10
  - trl
11
  - sft
12
  licence: license
13
  ---
14
 
15
+ # Model Card for meta-llama/Llama-3.1-8B
16
 
17
+ This model is a fine-tuned version of [meta-llama/Llama-3.1-8B](https://huggingface.co/meta-llama/Llama-3.1-8B) on the [['allenai/ultrafeedback_binarized_cleaned']](https://huggingface.co/datasets/['allenai/ultrafeedback_binarized_cleaned']) dataset.
18
  It has been trained using [TRL](https://github.com/huggingface/trl).
19
 
20
  ## Quick start
all_results.json CHANGED
@@ -1,5 +1,10 @@
1
  {
2
  "epoch": 1.0,
 
 
 
 
 
3
  "total_flos": 42608759930880.0,
4
  "train_loss": 1.261282390239192,
5
  "train_runtime": 737.9605,
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_loss": 1.1813496351242065,
4
+ "eval_runtime": 2.1935,
5
+ "eval_samples": 985,
6
+ "eval_samples_per_second": 93.915,
7
+ "eval_steps_per_second": 3.191,
8
  "total_flos": 42608759930880.0,
9
  "train_loss": 1.261282390239192,
10
  "train_runtime": 737.9605,
config.json CHANGED
@@ -31,6 +31,6 @@
31
  "tie_word_embeddings": false,
32
  "torch_dtype": "bfloat16",
33
  "transformers_version": "4.46.3",
34
- "use_cache": false,
35
  "vocab_size": 128256
36
  }
 
31
  "tie_word_embeddings": false,
32
  "torch_dtype": "bfloat16",
33
  "transformers_version": "4.46.3",
34
+ "use_cache": true,
35
  "vocab_size": 128256
36
  }
eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_loss": 1.175293207168579,
4
- "eval_runtime": 2.507,
5
  "eval_samples": 985,
6
- "eval_samples_per_second": 41.085,
7
- "eval_steps_per_second": 1.596
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_loss": 1.1813496351242065,
4
+ "eval_runtime": 2.1935,
5
  "eval_samples": 985,
6
+ "eval_samples_per_second": 93.915,
7
+ "eval_steps_per_second": 3.191
8
  }