ydshieh HF staff commited on
Commit
14a752b
1 Parent(s): fc4d6eb

Saving weights and logs of epoch 2 - step 2700

Browse files
ckpt_epoch_2_step_2700/config.json ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "VisionEncoderDecoderModel"
4
+ ],
5
+ "bos_token_id": 50256,
6
+ "decoder": {
7
+ "_name_or_path": "",
8
+ "activation_function": "gelu_new",
9
+ "add_cross_attention": true,
10
+ "architectures": [
11
+ "GPT2LMHeadModel"
12
+ ],
13
+ "attn_pdrop": 0.1,
14
+ "bad_words_ids": null,
15
+ "bos_token_id": 50256,
16
+ "chunk_size_feed_forward": 0,
17
+ "decoder_start_token_id": 50256,
18
+ "diversity_penalty": 0.0,
19
+ "do_sample": false,
20
+ "early_stopping": false,
21
+ "embd_pdrop": 0.1,
22
+ "encoder_no_repeat_ngram_size": 0,
23
+ "eos_token_id": 50256,
24
+ "finetuning_task": null,
25
+ "forced_bos_token_id": null,
26
+ "forced_eos_token_id": null,
27
+ "id2label": {
28
+ "0": "LABEL_0",
29
+ "1": "LABEL_1"
30
+ },
31
+ "initializer_range": 0.02,
32
+ "is_decoder": true,
33
+ "is_encoder_decoder": false,
34
+ "label2id": {
35
+ "LABEL_0": 0,
36
+ "LABEL_1": 1
37
+ },
38
+ "layer_norm_epsilon": 1e-05,
39
+ "length_penalty": 1.0,
40
+ "max_length": 20,
41
+ "min_length": 0,
42
+ "model_type": "gpt2",
43
+ "n_ctx": 1024,
44
+ "n_embd": 768,
45
+ "n_head": 12,
46
+ "n_inner": null,
47
+ "n_layer": 12,
48
+ "n_positions": 1024,
49
+ "no_repeat_ngram_size": 0,
50
+ "num_beam_groups": 1,
51
+ "num_beams": 1,
52
+ "num_return_sequences": 1,
53
+ "output_attentions": false,
54
+ "output_hidden_states": false,
55
+ "output_scores": false,
56
+ "pad_token_id": 50256,
57
+ "prefix": null,
58
+ "problem_type": null,
59
+ "pruned_heads": {},
60
+ "remove_invalid_values": false,
61
+ "repetition_penalty": 1.0,
62
+ "resid_pdrop": 0.1,
63
+ "return_dict": true,
64
+ "return_dict_in_generate": false,
65
+ "scale_attn_weights": true,
66
+ "sep_token_id": null,
67
+ "summary_activation": null,
68
+ "summary_first_dropout": 0.1,
69
+ "summary_proj_to_labels": true,
70
+ "summary_type": "cls_index",
71
+ "summary_use_proj": true,
72
+ "task_specific_params": {
73
+ "text-generation": {
74
+ "do_sample": true,
75
+ "max_length": 50
76
+ }
77
+ },
78
+ "temperature": 1.0,
79
+ "tie_encoder_decoder": false,
80
+ "tie_word_embeddings": true,
81
+ "tokenizer_class": null,
82
+ "top_k": 50,
83
+ "top_p": 1.0,
84
+ "torch_dtype": null,
85
+ "torchscript": false,
86
+ "transformers_version": "4.11.0.dev0",
87
+ "use_bfloat16": false,
88
+ "use_cache": true,
89
+ "vocab_size": 50257
90
+ },
91
+ "decoder_start_token_id": 50256,
92
+ "encoder": {
93
+ "_name_or_path": "",
94
+ "add_cross_attention": false,
95
+ "architectures": [
96
+ "ViTModel"
97
+ ],
98
+ "attention_probs_dropout_prob": 0.0,
99
+ "bad_words_ids": null,
100
+ "bos_token_id": null,
101
+ "chunk_size_feed_forward": 0,
102
+ "decoder_start_token_id": null,
103
+ "diversity_penalty": 0.0,
104
+ "do_sample": false,
105
+ "early_stopping": false,
106
+ "encoder_no_repeat_ngram_size": 0,
107
+ "eos_token_id": null,
108
+ "finetuning_task": null,
109
+ "forced_bos_token_id": null,
110
+ "forced_eos_token_id": null,
111
+ "hidden_act": "gelu",
112
+ "hidden_dropout_prob": 0.0,
113
+ "hidden_size": 768,
114
+ "id2label": {
115
+ "0": "LABEL_0",
116
+ "1": "LABEL_1"
117
+ },
118
+ "image_size": 224,
119
+ "initializer_range": 0.02,
120
+ "intermediate_size": 3072,
121
+ "is_decoder": false,
122
+ "is_encoder_decoder": false,
123
+ "label2id": {
124
+ "LABEL_0": 0,
125
+ "LABEL_1": 1
126
+ },
127
+ "layer_norm_eps": 1e-12,
128
+ "length_penalty": 1.0,
129
+ "max_length": 20,
130
+ "min_length": 0,
131
+ "model_type": "vit",
132
+ "no_repeat_ngram_size": 0,
133
+ "num_attention_heads": 12,
134
+ "num_beam_groups": 1,
135
+ "num_beams": 1,
136
+ "num_channels": 3,
137
+ "num_hidden_layers": 12,
138
+ "num_return_sequences": 1,
139
+ "output_attentions": false,
140
+ "output_hidden_states": false,
141
+ "output_scores": false,
142
+ "pad_token_id": null,
143
+ "patch_size": 16,
144
+ "prefix": null,
145
+ "problem_type": null,
146
+ "pruned_heads": {},
147
+ "remove_invalid_values": false,
148
+ "repetition_penalty": 1.0,
149
+ "return_dict": true,
150
+ "return_dict_in_generate": false,
151
+ "sep_token_id": null,
152
+ "task_specific_params": null,
153
+ "temperature": 1.0,
154
+ "tie_encoder_decoder": false,
155
+ "tie_word_embeddings": true,
156
+ "tokenizer_class": null,
157
+ "top_k": 50,
158
+ "top_p": 1.0,
159
+ "torch_dtype": null,
160
+ "torchscript": false,
161
+ "transformers_version": "4.11.0.dev0",
162
+ "use_bfloat16": false
163
+ },
164
+ "eos_token_id": 50256,
165
+ "is_encoder_decoder": true,
166
+ "model_type": "vision-encoder-decoder",
167
+ "pad_token_id": 50256,
168
+ "transformers_version": null
169
+ }
ckpt_epoch_2_step_2700/flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dedc5d4987607859fed8b4ce45f5574cf92ecf046d2263b55fcfc12e6bda68d
3
+ size 956799284
ckpt_epoch_2_step_2700/generation_eval.json ADDED
The diff for this file is too large to render. See raw diff
 
ckpt_epoch_2_step_2700/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
ckpt_epoch_2_step_2700/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>", "pad_token": "<|endoftext|>"}
ckpt_epoch_2_step_2700/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
ckpt_epoch_2_step_2700/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "gpt2", "tokenizer_class": "GPT2Tokenizer"}
ckpt_epoch_2_step_2700/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
events.out.tfevents.1633443513.t1v-n-bb5dfd23-w-0.8655.0.v2 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f0ee1dffb1fc6d1b8f2c3b81442f8965e1ee6fd6a641dccba47b50fcd601d6a
3
- size 1590027
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11e55aaee55ba295b3206dd0ebc81a2a71d40030a982cc3b6be33bba5c0391c4
3
+ size 1648289
report.txt CHANGED
@@ -256,3 +256,35 @@ Epoch... (2/30 | Step: 2390 | Loss: 2.0162031650543213, Learning Rate: 2.8964455
256
  Epoch... (2/30 | Step: 2400 | Loss: 1.9704089164733887, Learning Rate: 2.8960121198906563e-05)
257
  Epoch... (2/30 | Step: 2400 | Loss: 1.9704089164733887, Learning Rate: 2.8960121198906563e-05)
258
  Epoch... (2/30 | Step: 2400 | Eval Loss: 2.024594306945801 | Eval rouge1: 40.4764 | Eval rouge2: 14.9051 | Eval rougeL: 36.8785 | Eval rougeLsum: 36.8769 | Eval gen_len: 10.8998 |)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256
  Epoch... (2/30 | Step: 2400 | Loss: 1.9704089164733887, Learning Rate: 2.8960121198906563e-05)
257
  Epoch... (2/30 | Step: 2400 | Loss: 1.9704089164733887, Learning Rate: 2.8960121198906563e-05)
258
  Epoch... (2/30 | Step: 2400 | Eval Loss: 2.024594306945801 | Eval rouge1: 40.4764 | Eval rouge2: 14.9051 | Eval rougeL: 36.8785 | Eval rougeLsum: 36.8769 | Eval gen_len: 10.8998 |)
259
+ Epoch... (2/30 | Step: 2410 | Loss: 1.992959976196289, Learning Rate: 2.8955786547157913e-05)
260
+ Epoch... (2/30 | Step: 2420 | Loss: 1.9835933446884155, Learning Rate: 2.8951451895409264e-05)
261
+ Epoch... (2/30 | Step: 2430 | Loss: 2.101801872253418, Learning Rate: 2.894711542467121e-05)
262
+ Epoch... (2/30 | Step: 2440 | Loss: 2.038966178894043, Learning Rate: 2.894278077292256e-05)
263
+ Epoch... (2/30 | Step: 2450 | Loss: 1.9256877899169922, Learning Rate: 2.8938447940163314e-05)
264
+ Epoch... (2/30 | Step: 2460 | Loss: 1.9768502712249756, Learning Rate: 2.8934113288414665e-05)
265
+ Epoch... (2/30 | Step: 2470 | Loss: 1.9429033994674683, Learning Rate: 2.8929778636666015e-05)
266
+ Epoch... (2/30 | Step: 2480 | Loss: 2.0289816856384277, Learning Rate: 2.8925443984917365e-05)
267
+ Epoch... (2/30 | Step: 2490 | Loss: 1.983972191810608, Learning Rate: 2.8921109333168715e-05)
268
+ Epoch... (2/30 | Step: 2500 | Loss: 2.0486741065979004, Learning Rate: 2.8916774681420065e-05)
269
+ Epoch... (2/30 | Step: 2510 | Loss: 1.9563899040222168, Learning Rate: 2.8912440029671416e-05)
270
+ Epoch... (2/30 | Step: 2520 | Loss: 2.0897140502929688, Learning Rate: 2.8908103558933362e-05)
271
+ Epoch... (2/30 | Step: 2530 | Loss: 2.080152988433838, Learning Rate: 2.8903768907184713e-05)
272
+ Epoch... (2/30 | Step: 2540 | Loss: 1.9517621994018555, Learning Rate: 2.8899436074425466e-05)
273
+ Epoch... (2/30 | Step: 2550 | Loss: 2.0202088356018066, Learning Rate: 2.8895101422676817e-05)
274
+ Epoch... (2/30 | Step: 2560 | Loss: 1.9686460494995117, Learning Rate: 2.8890766770928167e-05)
275
+ Epoch... (2/30 | Step: 2570 | Loss: 1.897174596786499, Learning Rate: 2.8886432119179517e-05)
276
+ Epoch... (2/30 | Step: 2580 | Loss: 1.9737465381622314, Learning Rate: 2.8882097467430867e-05)
277
+ Epoch... (2/30 | Step: 2590 | Loss: 2.0247108936309814, Learning Rate: 2.8877762815682217e-05)
278
+ Epoch... (2/30 | Step: 2600 | Loss: 2.019479990005493, Learning Rate: 2.8873428163933568e-05)
279
+ Epoch... (2/30 | Step: 2610 | Loss: 1.921512246131897, Learning Rate: 2.8869093512184918e-05)
280
+ Epoch... (2/30 | Step: 2620 | Loss: 2.002574920654297, Learning Rate: 2.8864757041446865e-05)
281
+ Epoch... (2/30 | Step: 2630 | Loss: 1.9710190296173096, Learning Rate: 2.886042420868762e-05)
282
+ Epoch... (2/30 | Step: 2640 | Loss: 2.0847268104553223, Learning Rate: 2.885608955693897e-05)
283
+ Epoch... (2/30 | Step: 2650 | Loss: 1.9489305019378662, Learning Rate: 2.885175490519032e-05)
284
+ Epoch... (2/30 | Step: 2660 | Loss: 1.9855422973632812, Learning Rate: 2.884742025344167e-05)
285
+ Epoch... (2/30 | Step: 2670 | Loss: 2.001290798187256, Learning Rate: 2.884308560169302e-05)
286
+ Epoch... (2/30 | Step: 2680 | Loss: 1.9203612804412842, Learning Rate: 2.883875094994437e-05)
287
+ Epoch... (2/30 | Step: 2690 | Loss: 1.928293228149414, Learning Rate: 2.883441629819572e-05)
288
+ Epoch... (2/30 | Step: 2700 | Loss: 2.0274925231933594, Learning Rate: 2.883008164644707e-05)
289
+ Epoch... (2/30 | Step: 2700 | Loss: 2.0274925231933594, Learning Rate: 2.883008164644707e-05)
290
+ Epoch... (2/30 | Step: 2700 | Eval Loss: 2.009991407394409 | Eval rouge1: 40.5888 | Eval rouge2: 15.0982 | Eval rougeL: 36.9127 | Eval rougeLsum: 36.9165 | Eval gen_len: 10.6976 |)