lillian039 commited on
Commit
f4c4f24
1 Parent(s): 4f1153c

End of training

Browse files
README.md CHANGED
@@ -3,9 +3,19 @@ library_name: transformers
3
  license: llama3.1
4
  base_model: meta-llama/Meta-Llama-3.1-8B-Instruct
5
  tags:
 
6
  - trl
7
  - sft
8
  - generated_from_trainer
 
 
 
 
 
 
 
 
 
9
  model-index:
10
  - name: engineer1-heavy-barc-llama3.1-8b-ins-fft-transduction_lr1e-5_epoch3
11
  results: []
@@ -16,7 +26,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # engineer1-heavy-barc-llama3.1-8b-ins-fft-transduction_lr1e-5_epoch3
18
 
19
- This model is a fine-tuned version of [meta-llama/Meta-Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct) on the None dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.0219
22
 
 
3
  license: llama3.1
4
  base_model: meta-llama/Meta-Llama-3.1-8B-Instruct
5
  tags:
6
+ - alignment-handbook
7
  - trl
8
  - sft
9
  - generated_from_trainer
10
+ - trl
11
+ - sft
12
+ - generated_from_trainer
13
+ datasets:
14
+ - barc0/transduction_heavy_100k_jsonl
15
+ - barc0/transduction_heavy_suggestfunction_100k_jsonl
16
+ - barc0/transduction_rearc_dataset_400k
17
+ - barc0/transduction_angmented_100k-gpt4-description-gpt4omini-code_generated_problems
18
+ - barc0/transduction_angmented_100k_gpt4o-mini_generated_problems
19
  model-index:
20
  - name: engineer1-heavy-barc-llama3.1-8b-ins-fft-transduction_lr1e-5_epoch3
21
  results: []
 
26
 
27
  # engineer1-heavy-barc-llama3.1-8b-ins-fft-transduction_lr1e-5_epoch3
28
 
29
+ This model is a fine-tuned version of [meta-llama/Meta-Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct) on the barc0/transduction_heavy_100k_jsonl, the barc0/transduction_heavy_suggestfunction_100k_jsonl, the barc0/transduction_rearc_dataset_400k, the barc0/transduction_angmented_100k-gpt4-description-gpt4omini-code_generated_problems and the barc0/transduction_angmented_100k_gpt4o-mini_generated_problems datasets.
30
  It achieves the following results on the evaluation set:
31
  - Loss: 0.0219
32
 
all_results.json CHANGED
@@ -1,5 +1,10 @@
1
  {
2
  "epoch": 3.0,
 
 
 
 
 
3
  "total_flos": 4.3304182632184545e+18,
4
  "train_loss": 0.030125039980372262,
5
  "train_runtime": 67465.0545,
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_loss": 0.02185099571943283,
4
+ "eval_runtime": 292.4151,
5
+ "eval_samples": 25121,
6
+ "eval_samples_per_second": 85.909,
7
+ "eval_steps_per_second": 1.344,
8
  "total_flos": 4.3304182632184545e+18,
9
  "train_loss": 0.030125039980372262,
10
  "train_runtime": 67465.0545,
config.json CHANGED
@@ -35,6 +35,6 @@
35
  "tie_word_embeddings": false,
36
  "torch_dtype": "bfloat16",
37
  "transformers_version": "4.45.0.dev0",
38
- "use_cache": false,
39
  "vocab_size": 128256
40
  }
 
35
  "tie_word_embeddings": false,
36
  "torch_dtype": "bfloat16",
37
  "transformers_version": "4.45.0.dev0",
38
+ "use_cache": true,
39
  "vocab_size": 128256
40
  }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_loss": 0.02185099571943283,
4
+ "eval_runtime": 292.4151,
5
+ "eval_samples": 25121,
6
+ "eval_samples_per_second": 85.909,
7
+ "eval_steps_per_second": 1.344
8
+ }
runs/Oct26_18-07-58_7bcc0bab7cf9/events.out.tfevents.1730035682.7bcc0bab7cf9.660.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5d7c7188fbf1a847c859f39e709de9736dc1cd888fe237d2301993c46125f64
3
+ size 359