mootez commited on
Commit
486cd2d
·
verified ·
1 Parent(s): be63c8a

Upload folder using huggingface_hub

Browse files
checkpoint-12870/config.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/mootez/projects/def-tusharma/mootez/local_models/codet5-base",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "classifier_dropout": 0.0,
8
+ "d_ff": 3072,
9
+ "d_kv": 64,
10
+ "d_model": 768,
11
+ "decoder_start_token_id": 0,
12
+ "dense_act_fn": "relu",
13
+ "dropout_rate": 0.1,
14
+ "eos_token_id": 2,
15
+ "feed_forward_proj": "relu",
16
+ "gradient_checkpointing": false,
17
+ "id2label": {
18
+ "0": "LABEL_0"
19
+ },
20
+ "initializer_factor": 1.0,
21
+ "is_encoder_decoder": true,
22
+ "is_gated_act": false,
23
+ "label2id": {
24
+ "LABEL_0": 0
25
+ },
26
+ "layer_norm_epsilon": 1e-06,
27
+ "merge_layer_index": 9,
28
+ "merger_type": "mean",
29
+ "model_type": "t5",
30
+ "n_positions": 512,
31
+ "num_decoder_layers": 12,
32
+ "num_heads": 12,
33
+ "num_layers": 12,
34
+ "output_past": true,
35
+ "pad_token_id": 0,
36
+ "relative_attention_max_distance": 128,
37
+ "relative_attention_num_buckets": 32,
38
+ "task_specific_params": {
39
+ "summarization": {
40
+ "early_stopping": true,
41
+ "length_penalty": 2.0,
42
+ "max_length": 200,
43
+ "min_length": 30,
44
+ "no_repeat_ngram_size": 3,
45
+ "num_beams": 4,
46
+ "prefix": "summarize: "
47
+ },
48
+ "translation_en_to_de": {
49
+ "early_stopping": true,
50
+ "max_length": 300,
51
+ "num_beams": 4,
52
+ "prefix": "translate English to German: "
53
+ },
54
+ "translation_en_to_fr": {
55
+ "early_stopping": true,
56
+ "max_length": 300,
57
+ "num_beams": 4,
58
+ "prefix": "translate English to French: "
59
+ },
60
+ "translation_en_to_ro": {
61
+ "early_stopping": true,
62
+ "max_length": 300,
63
+ "num_beams": 4,
64
+ "prefix": "translate English to Romanian: "
65
+ }
66
+ },
67
+ "torch_dtype": "float32",
68
+ "transformers_version": "4.48.2",
69
+ "use_cache": true,
70
+ "vocab_size": 32100
71
+ }
checkpoint-12870/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.48.2"
7
+ }
checkpoint-12870/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:284afe79992fa3827161644d3b855b0d3e292b4451a62eed5d21bbee121aca77
3
+ size 891558696
checkpoint-12870/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0291001a85efe443475151a6a136cb59bfb944291870741305357f148b8f6b55
3
+ size 1783272762
checkpoint-12870/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ed77569057dd8e66c3a4cf6eb8dcbf759b0445e3c5870acf04d22085677a02d
3
+ size 14244
checkpoint-12870/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce1db8d949a216affb89182da9c2ef3ba19d289845b5e378bf1256011fe425ed
3
+ size 1064
checkpoint-12870/trainer_state.json ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.18614098142337673,
3
+ "best_model_checkpoint": "./logs/mean_codet5-base_L9_codetrans_123456_src_java_tgt_c_sharp/checkpoint-12870",
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 12870,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.3885003885003885,
13
+ "grad_norm": 4.147760391235352,
14
+ "learning_rate": 7.77302759424796e-06,
15
+ "loss": 2.7004,
16
+ "step": 1000
17
+ },
18
+ {
19
+ "epoch": 0.777000777000777,
20
+ "grad_norm": 3.5711517333984375,
21
+ "learning_rate": 1.554605518849592e-05,
22
+ "loss": 0.5569,
23
+ "step": 2000
24
+ },
25
+ {
26
+ "epoch": 1.0,
27
+ "eval_bleu": 0.179003356061123,
28
+ "eval_brevity_penalty": 0.20052172529890358,
29
+ "eval_length_ratio": 0.38360728201434136,
30
+ "eval_loss": 0.30778825283050537,
31
+ "eval_precision": [
32
+ 0.9413609884130337,
33
+ 0.9008497972564921,
34
+ 0.8750027253897308,
35
+ 0.8558171317726933
36
+ ],
37
+ "eval_reference_length": 122164,
38
+ "eval_rouge1": 0.5631067571294869,
39
+ "eval_rouge2": 0.48074444846937914,
40
+ "eval_rougeL": 0.5612806210060178,
41
+ "eval_rougeLsum": 0.5619217873625282,
42
+ "eval_runtime": 18.9342,
43
+ "eval_samples_per_second": 26.354,
44
+ "eval_steps_per_second": 1.69,
45
+ "eval_translation_length": 46863,
46
+ "step": 2574
47
+ },
48
+ {
49
+ "epoch": 1.1655011655011656,
50
+ "grad_norm": 2.967615842819214,
51
+ "learning_rate": 1.9631372210471794e-05,
52
+ "loss": 0.3833,
53
+ "step": 3000
54
+ },
55
+ {
56
+ "epoch": 1.554001554001554,
57
+ "grad_norm": 2.3476781845092773,
58
+ "learning_rate": 1.8768075279492384e-05,
59
+ "loss": 0.3027,
60
+ "step": 4000
61
+ },
62
+ {
63
+ "epoch": 1.9425019425019425,
64
+ "grad_norm": 2.0748913288116455,
65
+ "learning_rate": 1.790477834851297e-05,
66
+ "loss": 0.2798,
67
+ "step": 5000
68
+ },
69
+ {
70
+ "epoch": 2.0,
71
+ "eval_bleu": 0.18064475096043295,
72
+ "eval_brevity_penalty": 0.19515129218839872,
73
+ "eval_length_ratio": 0.37965358043286074,
74
+ "eval_loss": 0.21577422320842743,
75
+ "eval_precision": [
76
+ 0.9603492884864165,
77
+ 0.9310825832043765,
78
+ 0.9128509100524437,
79
+ 0.8994942405810663
80
+ ],
81
+ "eval_reference_length": 122164,
82
+ "eval_rouge1": 0.5917036491654064,
83
+ "eval_rouge2": 0.5237584687508665,
84
+ "eval_rougeL": 0.5899047357312865,
85
+ "eval_rougeLsum": 0.5900052840562033,
86
+ "eval_runtime": 18.9252,
87
+ "eval_samples_per_second": 26.367,
88
+ "eval_steps_per_second": 1.691,
89
+ "eval_translation_length": 46380,
90
+ "step": 5148
91
+ },
92
+ {
93
+ "epoch": 2.331002331002331,
94
+ "grad_norm": 3.4939310550689697,
95
+ "learning_rate": 1.7041481417533562e-05,
96
+ "loss": 0.213,
97
+ "step": 6000
98
+ },
99
+ {
100
+ "epoch": 2.7195027195027195,
101
+ "grad_norm": 2.666109085083008,
102
+ "learning_rate": 1.6178184486554153e-05,
103
+ "loss": 0.2046,
104
+ "step": 7000
105
+ },
106
+ {
107
+ "epoch": 3.0,
108
+ "eval_bleu": 0.17965908269115224,
109
+ "eval_brevity_penalty": 0.19235220844345907,
110
+ "eval_length_ratio": 0.3775825938901804,
111
+ "eval_loss": 0.183700829744339,
112
+ "eval_precision": [
113
+ 0.9631018709215861,
114
+ 0.9383273428596476,
115
+ 0.9230428327682865,
116
+ 0.9123459556352229
117
+ ],
118
+ "eval_reference_length": 122164,
119
+ "eval_rouge1": 0.5990315616712723,
120
+ "eval_rouge2": 0.5368386673828831,
121
+ "eval_rougeL": 0.5968963041105507,
122
+ "eval_rougeLsum": 0.5970830288832117,
123
+ "eval_runtime": 18.9858,
124
+ "eval_samples_per_second": 26.283,
125
+ "eval_steps_per_second": 1.685,
126
+ "eval_translation_length": 46127,
127
+ "step": 7722
128
+ },
129
+ {
130
+ "epoch": 3.108003108003108,
131
+ "grad_norm": 0.23527251183986664,
132
+ "learning_rate": 1.5314887555574744e-05,
133
+ "loss": 0.1861,
134
+ "step": 8000
135
+ },
136
+ {
137
+ "epoch": 3.4965034965034967,
138
+ "grad_norm": 11.32079029083252,
139
+ "learning_rate": 1.4451590624595331e-05,
140
+ "loss": 0.1489,
141
+ "step": 9000
142
+ },
143
+ {
144
+ "epoch": 3.885003885003885,
145
+ "grad_norm": 1.5271377563476562,
146
+ "learning_rate": 1.358829369361592e-05,
147
+ "loss": 0.1517,
148
+ "step": 10000
149
+ },
150
+ {
151
+ "epoch": 4.0,
152
+ "eval_bleu": 0.18288308059259456,
153
+ "eval_brevity_penalty": 0.1953064675468236,
154
+ "eval_length_ratio": 0.37976818047870076,
155
+ "eval_loss": 0.17038071155548096,
156
+ "eval_precision": [
157
+ 0.9623011596327111,
158
+ 0.9396448414860007,
159
+ 0.9264913208212178,
160
+ 0.9177227877141012
161
+ ],
162
+ "eval_reference_length": 122164,
163
+ "eval_rouge1": 0.6070299892569158,
164
+ "eval_rouge2": 0.5502100359168347,
165
+ "eval_rougeL": 0.6057080967298918,
166
+ "eval_rougeLsum": 0.6048162812836385,
167
+ "eval_runtime": 18.8925,
168
+ "eval_samples_per_second": 26.413,
169
+ "eval_steps_per_second": 1.694,
170
+ "eval_translation_length": 46394,
171
+ "step": 10296
172
+ },
173
+ {
174
+ "epoch": 4.273504273504273,
175
+ "grad_norm": 0.5481866002082825,
176
+ "learning_rate": 1.272499676263651e-05,
177
+ "loss": 0.1311,
178
+ "step": 11000
179
+ },
180
+ {
181
+ "epoch": 4.662004662004662,
182
+ "grad_norm": 1.118417739868164,
183
+ "learning_rate": 1.1861699831657098e-05,
184
+ "loss": 0.1234,
185
+ "step": 12000
186
+ },
187
+ {
188
+ "epoch": 5.0,
189
+ "eval_bleu": 0.18614098142337673,
190
+ "eval_brevity_penalty": 0.20014256077381284,
191
+ "eval_length_ratio": 0.38332896761730134,
192
+ "eval_loss": 0.1631670743227005,
193
+ "eval_precision": [
194
+ 0.9544085929658972,
195
+ 0.9334556442909562,
196
+ 0.9207305099168684,
197
+ 0.912115062207712
198
+ ],
199
+ "eval_reference_length": 122164,
200
+ "eval_rouge1": 0.6014376198086866,
201
+ "eval_rouge2": 0.5453103253739024,
202
+ "eval_rougeL": 0.5993123114276371,
203
+ "eval_rougeLsum": 0.5995004099683573,
204
+ "eval_runtime": 18.9878,
205
+ "eval_samples_per_second": 26.28,
206
+ "eval_steps_per_second": 1.685,
207
+ "eval_translation_length": 46829,
208
+ "step": 12870
209
+ }
210
+ ],
211
+ "logging_steps": 1000,
212
+ "max_steps": 25740,
213
+ "num_input_tokens_seen": 0,
214
+ "num_train_epochs": 10,
215
+ "save_steps": 500,
216
+ "stateful_callbacks": {
217
+ "TrainerControl": {
218
+ "args": {
219
+ "should_epoch_stop": false,
220
+ "should_evaluate": false,
221
+ "should_log": false,
222
+ "should_save": true,
223
+ "should_training_stop": false
224
+ },
225
+ "attributes": {}
226
+ }
227
+ },
228
+ "total_flos": 1.561183084224e+16,
229
+ "train_batch_size": 4,
230
+ "trial_name": null,
231
+ "trial_params": null
232
+ }
checkpoint-12870/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:338d5ab1252a21dbf44688f0562800eb9a4054e5c6501bf606c1c55057ff1f41
3
+ size 5560
config.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/mootez/projects/def-tusharma/mootez/local_models/codet5-base",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "classifier_dropout": 0.0,
8
+ "d_ff": 3072,
9
+ "d_kv": 64,
10
+ "d_model": 768,
11
+ "decoder_start_token_id": 0,
12
+ "dense_act_fn": "relu",
13
+ "dropout_rate": 0.1,
14
+ "eos_token_id": 2,
15
+ "feed_forward_proj": "relu",
16
+ "gradient_checkpointing": false,
17
+ "id2label": {
18
+ "0": "LABEL_0"
19
+ },
20
+ "initializer_factor": 1.0,
21
+ "is_encoder_decoder": true,
22
+ "is_gated_act": false,
23
+ "label2id": {
24
+ "LABEL_0": 0
25
+ },
26
+ "layer_norm_epsilon": 1e-06,
27
+ "merge_layer_index": 9,
28
+ "merger_type": "mean",
29
+ "model_type": "t5",
30
+ "n_positions": 512,
31
+ "num_decoder_layers": 12,
32
+ "num_heads": 12,
33
+ "num_layers": 12,
34
+ "output_past": true,
35
+ "pad_token_id": 0,
36
+ "relative_attention_max_distance": 128,
37
+ "relative_attention_num_buckets": 32,
38
+ "task_specific_params": {
39
+ "summarization": {
40
+ "early_stopping": true,
41
+ "length_penalty": 2.0,
42
+ "max_length": 200,
43
+ "min_length": 30,
44
+ "no_repeat_ngram_size": 3,
45
+ "num_beams": 4,
46
+ "prefix": "summarize: "
47
+ },
48
+ "translation_en_to_de": {
49
+ "early_stopping": true,
50
+ "max_length": 300,
51
+ "num_beams": 4,
52
+ "prefix": "translate English to German: "
53
+ },
54
+ "translation_en_to_fr": {
55
+ "early_stopping": true,
56
+ "max_length": 300,
57
+ "num_beams": 4,
58
+ "prefix": "translate English to French: "
59
+ },
60
+ "translation_en_to_ro": {
61
+ "early_stopping": true,
62
+ "max_length": 300,
63
+ "num_beams": 4,
64
+ "prefix": "translate English to Romanian: "
65
+ }
66
+ },
67
+ "torch_dtype": "float32",
68
+ "transformers_version": "4.48.2",
69
+ "use_cache": true,
70
+ "vocab_size": 32100
71
+ }
events.out.tfevents.1751375379.gra5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20338cc7a21629bc77ab67c2a7d5c9a66db7b8bb6f485c3abf01eb373575a290
3
+ size 19672
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.48.2"
7
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:284afe79992fa3827161644d3b855b0d3e292b4451a62eed5d21bbee121aca77
3
+ size 891558696
results.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "rouge1": 0.6744308191394062,
3
+ "rouge2": 0.6272648923596416,
4
+ "rougeL": 0.6715891042607202,
5
+ "rougeLsum": 0.6721035285678223,
6
+ "bleu": 0.5200023942395184,
7
+ "precision": [
8
+ 0.5384332484015192,
9
+ 0.5223386824731339,
10
+ 0.5131998454180957,
11
+ 0.5065832163091376
12
+ ],
13
+ "brevity_penalty": 1.0,
14
+ "length_ratio": 1.8270932607215793,
15
+ "translation_length": 416020,
16
+ "reference_length": 227695
17
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:338d5ab1252a21dbf44688f0562800eb9a4054e5c6501bf606c1c55057ff1f41
3
+ size 5560