Holmeister commited on
Commit
22f852e
1 Parent(s): 100217e

End of training

Browse files
Files changed (5) hide show
  1. README.md +2 -1
  2. all_results.json +12 -0
  3. eval_results.json +7 -0
  4. train_results.json +8 -0
  5. trainer_state.json +66 -0
README.md CHANGED
@@ -2,6 +2,7 @@
2
  library_name: peft
3
  tags:
4
  - llama-factory
 
5
  - generated_from_trainer
6
  base_model: meta-llama/Meta-Llama-3-8B
7
  model-index:
@@ -14,7 +15,7 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # LLama3_deneme
16
 
17
- This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
  - Loss: 0.1256
20
 
 
2
  library_name: peft
3
  tags:
4
  - llama-factory
5
+ - lora
6
  - generated_from_trainer
7
  base_model: meta-llama/Meta-Llama-3-8B
8
  model-index:
 
15
 
16
  # LLama3_deneme
17
 
18
+ This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on the emollms_ei_oc_mixed dataset.
19
  It achieves the following results on the evaluation set:
20
  - Loss: 0.1256
21
 
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.4979253112033195,
3
+ "eval_loss": 0.12561871111392975,
4
+ "eval_runtime": 54.7022,
5
+ "eval_samples_per_second": 15.667,
6
+ "eval_steps_per_second": 1.974,
7
+ "total_flos": 2.42755307569152e+16,
8
+ "train_loss": 0.2621128797531128,
9
+ "train_runtime": 650.0908,
10
+ "train_samples_per_second": 5.929,
11
+ "train_steps_per_second": 0.023
12
+ }
eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.4979253112033195,
3
+ "eval_loss": 0.12561871111392975,
4
+ "eval_runtime": 54.7022,
5
+ "eval_samples_per_second": 15.667,
6
+ "eval_steps_per_second": 1.974
7
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.4979253112033195,
3
+ "total_flos": 2.42755307569152e+16,
4
+ "train_loss": 0.2621128797531128,
5
+ "train_runtime": 650.0908,
6
+ "train_samples_per_second": 5.929,
7
+ "train_steps_per_second": 0.023
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.12561871111392975,
3
+ "best_model_checkpoint": "saves/LLaMA3-8B/lora/train_1/checkpoint-10",
4
+ "epoch": 0.4979253112033195,
5
+ "eval_steps": 10,
6
+ "global_step": 15,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.33195020746887965,
13
+ "grad_norm": 0.14592523872852325,
14
+ "learning_rate": 7.500000000000002e-05,
15
+ "loss": 0.3294,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.33195020746887965,
20
+ "eval_loss": 0.12561871111392975,
21
+ "eval_runtime": 54.6753,
22
+ "eval_samples_per_second": 15.674,
23
+ "eval_steps_per_second": 1.975,
24
+ "step": 10
25
+ },
26
+ {
27
+ "epoch": 0.4979253112033195,
28
+ "step": 15,
29
+ "total_flos": 2.42755307569152e+16,
30
+ "train_loss": 0.2621128797531128,
31
+ "train_runtime": 650.0908,
32
+ "train_samples_per_second": 5.929,
33
+ "train_steps_per_second": 0.023
34
+ }
35
+ ],
36
+ "logging_steps": 10,
37
+ "max_steps": 15,
38
+ "num_input_tokens_seen": 0,
39
+ "num_train_epochs": 1,
40
+ "save_steps": 10,
41
+ "stateful_callbacks": {
42
+ "EarlyStoppingCallback": {
43
+ "args": {
44
+ "early_stopping_patience": 3,
45
+ "early_stopping_threshold": 0.0
46
+ },
47
+ "attributes": {
48
+ "early_stopping_patience_counter": 0
49
+ }
50
+ },
51
+ "TrainerControl": {
52
+ "args": {
53
+ "should_epoch_stop": false,
54
+ "should_evaluate": false,
55
+ "should_log": false,
56
+ "should_save": true,
57
+ "should_training_stop": false
58
+ },
59
+ "attributes": {}
60
+ }
61
+ },
62
+ "total_flos": 2.42755307569152e+16,
63
+ "train_batch_size": 32,
64
+ "trial_name": null,
65
+ "trial_params": null
66
+ }