aleksahet commited on
Commit
a505282
·
1 Parent(s): 3984fc7

Upload 9 files

Browse files
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "classla/bcms-bertic",
3
+ "architectures": [
4
+ "ElectraForQuestionAnswering"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "embedding_size": 768,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "electra",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "summary_activation": "gelu",
22
+ "summary_last_dropout": 0.1,
23
+ "summary_type": "first",
24
+ "summary_use_proj": true,
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.30.2",
27
+ "type_vocab_size": 2,
28
+ "use_cache": true,
29
+ "vocab_size": 32000
30
+ }
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5adfa149ab20a66653f4f028a319b701301a618a44f3b019f68764ed6cebcf53
3
+ size 14575
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba2739a46f2485136d8438882e8cfcc3c657672abd33abdfef86aeb67d6de82f
3
+ size 627
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": false,
6
+ "mask_token": "[MASK]",
7
+ "max_len": 512,
8
+ "model_max_length": 512,
9
+ "never_split": null,
10
+ "pad_token": "[PAD]",
11
+ "sep_token": "[SEP]",
12
+ "strip_accents": false,
13
+ "tokenize_chinese_chars": true,
14
+ "tokenizer_class": "ElectraTokenizer",
15
+ "unk_token": "[UNK]"
16
+ }
trainer_state.json ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "global_step": 16290,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.37,
12
+ "learning_rate": 2.6316758747697975e-05,
13
+ "loss": 1.7533,
14
+ "step": 2000
15
+ },
16
+ {
17
+ "epoch": 0.37,
18
+ "eval_loss": 1.023402452468872,
19
+ "eval_runtime": 20.2636,
20
+ "eval_samples_per_second": 57.69,
21
+ "eval_steps_per_second": 3.652,
22
+ "step": 2000
23
+ },
24
+ {
25
+ "epoch": 0.74,
26
+ "learning_rate": 2.2633517495395946e-05,
27
+ "loss": 1.2356,
28
+ "step": 4000
29
+ },
30
+ {
31
+ "epoch": 0.74,
32
+ "eval_loss": 0.9132137298583984,
33
+ "eval_runtime": 20.2624,
34
+ "eval_samples_per_second": 57.693,
35
+ "eval_steps_per_second": 3.652,
36
+ "step": 4000
37
+ },
38
+ {
39
+ "epoch": 1.1,
40
+ "learning_rate": 1.8950276243093924e-05,
41
+ "loss": 1.0926,
42
+ "step": 6000
43
+ },
44
+ {
45
+ "epoch": 1.1,
46
+ "eval_loss": 0.9214878082275391,
47
+ "eval_runtime": 20.2606,
48
+ "eval_samples_per_second": 57.698,
49
+ "eval_steps_per_second": 3.652,
50
+ "step": 6000
51
+ },
52
+ {
53
+ "epoch": 1.47,
54
+ "learning_rate": 1.52670349907919e-05,
55
+ "loss": 0.8857,
56
+ "step": 8000
57
+ },
58
+ {
59
+ "epoch": 1.47,
60
+ "eval_loss": 0.948037326335907,
61
+ "eval_runtime": 20.263,
62
+ "eval_samples_per_second": 57.691,
63
+ "eval_steps_per_second": 3.652,
64
+ "step": 8000
65
+ },
66
+ {
67
+ "epoch": 1.84,
68
+ "learning_rate": 1.1583793738489871e-05,
69
+ "loss": 0.8726,
70
+ "step": 10000
71
+ },
72
+ {
73
+ "epoch": 1.84,
74
+ "eval_loss": 0.9162865877151489,
75
+ "eval_runtime": 20.2461,
76
+ "eval_samples_per_second": 57.739,
77
+ "eval_steps_per_second": 3.655,
78
+ "step": 10000
79
+ },
80
+ {
81
+ "epoch": 2.21,
82
+ "learning_rate": 7.900552486187846e-06,
83
+ "loss": 0.7355,
84
+ "step": 12000
85
+ },
86
+ {
87
+ "epoch": 2.21,
88
+ "eval_loss": 0.9980245232582092,
89
+ "eval_runtime": 20.2431,
90
+ "eval_samples_per_second": 57.748,
91
+ "eval_steps_per_second": 3.656,
92
+ "step": 12000
93
+ },
94
+ {
95
+ "epoch": 2.58,
96
+ "learning_rate": 4.21731123388582e-06,
97
+ "loss": 0.6529,
98
+ "step": 14000
99
+ },
100
+ {
101
+ "epoch": 2.58,
102
+ "eval_loss": 1.0005152225494385,
103
+ "eval_runtime": 20.2501,
104
+ "eval_samples_per_second": 57.728,
105
+ "eval_steps_per_second": 3.654,
106
+ "step": 14000
107
+ },
108
+ {
109
+ "epoch": 2.95,
110
+ "learning_rate": 5.340699815837937e-07,
111
+ "loss": 0.6474,
112
+ "step": 16000
113
+ },
114
+ {
115
+ "epoch": 2.95,
116
+ "eval_loss": 0.9974539875984192,
117
+ "eval_runtime": 20.2537,
118
+ "eval_samples_per_second": 57.718,
119
+ "eval_steps_per_second": 3.654,
120
+ "step": 16000
121
+ }
122
+ ],
123
+ "max_steps": 16290,
124
+ "num_train_epochs": 3,
125
+ "total_flos": 6.809654777296896e+16,
126
+ "trial_name": null,
127
+ "trial_params": null
128
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a24233c5d4b4c090c078c9c822493abacc04e7e6507299466dc59b3499294e95
3
+ size 3899
vocab.txt ADDED
The diff for this file is too large to render. See raw diff