martimfasantos commited on
Commit
4b6f842
·
verified ·
1 Parent(s): 53add3e

Model save

Browse files
README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
4
+ tags:
5
+ - trl
6
+ - sft
7
+ - generated_from_trainer
8
+ datasets:
9
+ - generator
10
+ model-index:
11
+ - name: tinyllama-1.1b-mt-sft-full_sardine_2gpus_old
12
+ results: []
13
+ ---
14
+
15
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
+ should probably proofread and complete it, then remove this comment. -->
17
+
18
+ # tinyllama-1.1b-mt-sft-full_sardine_2gpus_old
19
+
20
+ This model is a fine-tuned version of [TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T](https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T) on the generator dataset.
21
+
22
+ ## Model description
23
+
24
+ More information needed
25
+
26
+ ## Intended uses & limitations
27
+
28
+ More information needed
29
+
30
+ ## Training and evaluation data
31
+
32
+ More information needed
33
+
34
+ ## Training procedure
35
+
36
+ ### Training hyperparameters
37
+
38
+ The following hyperparameters were used during training:
39
+ - learning_rate: 1e-05
40
+ - train_batch_size: 1
41
+ - eval_batch_size: 8
42
+ - seed: 42
43
+ - distributed_type: multi-GPU
44
+ - num_devices: 2
45
+ - gradient_accumulation_steps: 16
46
+ - total_train_batch_size: 32
47
+ - total_eval_batch_size: 16
48
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
49
+ - lr_scheduler_type: cosine
50
+ - lr_scheduler_warmup_ratio: 0.1
51
+ - num_epochs: 2
52
+
53
+ ### Training results
54
+
55
+
56
+
57
+ ### Framework versions
58
+
59
+ - Transformers 4.43.3
60
+ - Pytorch 2.1.2+cu121
61
+ - Datasets 2.20.0
62
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.9726027397260273,
3
+ "total_flos": 3911524024320.0,
4
+ "train_loss": 1.0647009246879153,
5
+ "train_runtime": 784.6331,
6
+ "train_samples": 15798,
7
+ "train_samples_per_second": 4.466,
8
+ "train_steps_per_second": 0.138
9
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "eos_token_id": 2,
4
+ "max_length": 2048,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.43.3"
7
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aba8507b6166ac50e982d1107d2d0129231b5ff780e5f543f5d4c211a88d4716
3
  size 2200119864
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d585a465b9c2ba5a2f48b37a3e72f751d98c4b9603052d99f648b176303be07
3
  size 2200119864
runs/Aug06_22-29-25_poseidon/events.out.tfevents.1722983380.poseidon.1773058.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:515702cfe52cdae98cc572f222a361328b8783c5ade56dcf1b805e64cbfdcad6
3
- size 7469
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5595097f02e3b122cf2be2c5632a21b41e4386faed0267eaa802407b64f3d93f
3
+ size 7817
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.9726027397260273,
3
+ "total_flos": 3911524024320.0,
4
+ "train_loss": 1.0647009246879153,
5
+ "train_runtime": 784.6331,
6
+ "train_samples": 15798,
7
+ "train_samples_per_second": 4.466,
8
+ "train_steps_per_second": 0.138
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.9726027397260273,
5
+ "eval_steps": 500,
6
+ "global_step": 108,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0182648401826484,
13
+ "grad_norm": 21.109767473838268,
14
+ "learning_rate": 9.090909090909091e-07,
15
+ "loss": 1.6365,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.182648401826484,
20
+ "grad_norm": 1.9665126756085918,
21
+ "learning_rate": 9.090909090909091e-06,
22
+ "loss": 1.5289,
23
+ "step": 10
24
+ },
25
+ {
26
+ "epoch": 0.365296803652968,
27
+ "grad_norm": 1.3148117828569053,
28
+ "learning_rate": 9.789086620939936e-06,
29
+ "loss": 1.2835,
30
+ "step": 20
31
+ },
32
+ {
33
+ "epoch": 0.547945205479452,
34
+ "grad_norm": 1.1804824330362151,
35
+ "learning_rate": 9.082818315286054e-06,
36
+ "loss": 1.1618,
37
+ "step": 30
38
+ },
39
+ {
40
+ "epoch": 0.730593607305936,
41
+ "grad_norm": 1.116060986988856,
42
+ "learning_rate": 7.952011865029614e-06,
43
+ "loss": 1.1206,
44
+ "step": 40
45
+ },
46
+ {
47
+ "epoch": 0.91324200913242,
48
+ "grad_norm": 1.1216076426923212,
49
+ "learning_rate": 6.514250379489754e-06,
50
+ "loss": 1.0685,
51
+ "step": 50
52
+ },
53
+ {
54
+ "epoch": 1.095890410958904,
55
+ "grad_norm": 1.1626749139232289,
56
+ "learning_rate": 4.919034655987493e-06,
57
+ "loss": 0.9895,
58
+ "step": 60
59
+ },
60
+ {
61
+ "epoch": 1.278538812785388,
62
+ "grad_norm": 1.0776560696316984,
63
+ "learning_rate": 3.3322378417458985e-06,
64
+ "loss": 0.9347,
65
+ "step": 70
66
+ },
67
+ {
68
+ "epoch": 1.461187214611872,
69
+ "grad_norm": 1.0533230608492796,
70
+ "learning_rate": 1.9188576719953635e-06,
71
+ "loss": 0.9082,
72
+ "step": 80
73
+ },
74
+ {
75
+ "epoch": 1.643835616438356,
76
+ "grad_norm": 1.0469386406850305,
77
+ "learning_rate": 8.258597348536452e-07,
78
+ "loss": 0.89,
79
+ "step": 90
80
+ },
81
+ {
82
+ "epoch": 1.82648401826484,
83
+ "grad_norm": 1.0954009517444092,
84
+ "learning_rate": 1.6689574843694433e-07,
85
+ "loss": 0.8899,
86
+ "step": 100
87
+ },
88
+ {
89
+ "epoch": 1.9726027397260273,
90
+ "step": 108,
91
+ "total_flos": 3911524024320.0,
92
+ "train_loss": 1.0647009246879153,
93
+ "train_runtime": 784.6331,
94
+ "train_samples_per_second": 4.466,
95
+ "train_steps_per_second": 0.138
96
+ }
97
+ ],
98
+ "logging_steps": 10,
99
+ "max_steps": 108,
100
+ "num_input_tokens_seen": 0,
101
+ "num_train_epochs": 2,
102
+ "save_steps": 100,
103
+ "stateful_callbacks": {
104
+ "TrainerControl": {
105
+ "args": {
106
+ "should_epoch_stop": false,
107
+ "should_evaluate": false,
108
+ "should_log": false,
109
+ "should_save": true,
110
+ "should_training_stop": true
111
+ },
112
+ "attributes": {}
113
+ }
114
+ },
115
+ "total_flos": 3911524024320.0,
116
+ "train_batch_size": 1,
117
+ "trial_name": null,
118
+ "trial_params": null
119
+ }