stefan-it commited on
Commit
d4a1581
1 Parent(s): b656265

model/tokenizer: add initial version

Browse files
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<end>": 32001,
3
+ "<start>": 32000
4
+ }
config.json ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SpanMarkerModel"
4
+ ],
5
+ "encoder": {
6
+ "_name_or_path": "gwlms/teams-base-dewiki-v1-discriminator",
7
+ "add_cross_attention": false,
8
+ "architectures": [
9
+ "ElectraForPreTraining"
10
+ ],
11
+ "attention_probs_dropout_prob": 0.1,
12
+ "bad_words_ids": null,
13
+ "begin_suppress_tokens": null,
14
+ "bos_token_id": null,
15
+ "chunk_size_feed_forward": 0,
16
+ "classifier_dropout": null,
17
+ "cross_attention_hidden_size": null,
18
+ "decoder_start_token_id": null,
19
+ "diversity_penalty": 0.0,
20
+ "do_sample": false,
21
+ "early_stopping": false,
22
+ "embedding_size": 768,
23
+ "encoder_no_repeat_ngram_size": 0,
24
+ "eos_token_id": null,
25
+ "exponential_decay_length_penalty": null,
26
+ "finetuning_task": null,
27
+ "forced_bos_token_id": null,
28
+ "forced_eos_token_id": null,
29
+ "hidden_act": "gelu",
30
+ "hidden_dropout_prob": 0.1,
31
+ "hidden_size": 768,
32
+ "id2label": {
33
+ "0": "O",
34
+ "1": "B-LOC",
35
+ "2": "I-LOC",
36
+ "3": "B-LOCderiv",
37
+ "4": "I-LOCderiv",
38
+ "5": "B-LOCpart",
39
+ "6": "I-LOCpart",
40
+ "7": "B-ORG",
41
+ "8": "I-ORG",
42
+ "9": "B-ORGderiv",
43
+ "10": "I-ORGderiv",
44
+ "11": "B-ORGpart",
45
+ "12": "I-ORGpart",
46
+ "13": "B-OTH",
47
+ "14": "I-OTH",
48
+ "15": "B-OTHderiv",
49
+ "16": "I-OTHderiv",
50
+ "17": "B-OTHpart",
51
+ "18": "I-OTHpart",
52
+ "19": "B-PER",
53
+ "20": "I-PER",
54
+ "21": "B-PERderiv",
55
+ "22": "I-PERderiv",
56
+ "23": "B-PERpart",
57
+ "24": "I-PERpart"
58
+ },
59
+ "initializer_range": 0.02,
60
+ "intermediate_size": 3072,
61
+ "is_decoder": false,
62
+ "is_encoder_decoder": false,
63
+ "label2id": {
64
+ "B-LOC": 1,
65
+ "B-LOCderiv": 3,
66
+ "B-LOCpart": 5,
67
+ "B-ORG": 7,
68
+ "B-ORGderiv": 9,
69
+ "B-ORGpart": 11,
70
+ "B-OTH": 13,
71
+ "B-OTHderiv": 15,
72
+ "B-OTHpart": 17,
73
+ "B-PER": 19,
74
+ "B-PERderiv": 21,
75
+ "B-PERpart": 23,
76
+ "I-LOC": 2,
77
+ "I-LOCderiv": 4,
78
+ "I-LOCpart": 6,
79
+ "I-ORG": 8,
80
+ "I-ORGderiv": 10,
81
+ "I-ORGpart": 12,
82
+ "I-OTH": 14,
83
+ "I-OTHderiv": 16,
84
+ "I-OTHpart": 18,
85
+ "I-PER": 20,
86
+ "I-PERderiv": 22,
87
+ "I-PERpart": 24,
88
+ "O": 0
89
+ },
90
+ "layer_norm_eps": 1e-12,
91
+ "length_penalty": 1.0,
92
+ "max_length": 20,
93
+ "max_position_embeddings": 512,
94
+ "min_length": 0,
95
+ "model_type": "electra",
96
+ "no_repeat_ngram_size": 0,
97
+ "num_attention_heads": 12,
98
+ "num_beam_groups": 1,
99
+ "num_beams": 1,
100
+ "num_hidden_layers": 12,
101
+ "num_return_sequences": 1,
102
+ "output_attentions": false,
103
+ "output_hidden_states": false,
104
+ "output_scores": false,
105
+ "pad_token_id": 0,
106
+ "position_embedding_type": "absolute",
107
+ "prefix": null,
108
+ "problem_type": null,
109
+ "pruned_heads": {},
110
+ "remove_invalid_values": false,
111
+ "repetition_penalty": 1.0,
112
+ "return_dict": true,
113
+ "return_dict_in_generate": false,
114
+ "sep_token_id": null,
115
+ "summary_activation": "gelu",
116
+ "summary_last_dropout": 0.1,
117
+ "summary_type": "first",
118
+ "summary_use_proj": true,
119
+ "suppress_tokens": null,
120
+ "task_specific_params": null,
121
+ "temperature": 1.0,
122
+ "tf_legacy_loss": false,
123
+ "tie_encoder_decoder": false,
124
+ "tie_word_embeddings": true,
125
+ "tokenizer_class": null,
126
+ "top_k": 50,
127
+ "top_p": 1.0,
128
+ "torch_dtype": "float32",
129
+ "torchscript": false,
130
+ "transformers_version": "4.31.0",
131
+ "type_vocab_size": 2,
132
+ "typical_p": 1.0,
133
+ "use_bfloat16": false,
134
+ "use_cache": true,
135
+ "vocab_size": 32002
136
+ },
137
+ "entity_max_length": 8,
138
+ "id2label": {
139
+ "0": "O",
140
+ "1": "LOC",
141
+ "2": "LOCderiv",
142
+ "3": "LOCpart",
143
+ "4": "ORG",
144
+ "5": "ORGderiv",
145
+ "6": "ORGpart",
146
+ "7": "OTH",
147
+ "8": "OTHderiv",
148
+ "9": "OTHpart",
149
+ "10": "PER",
150
+ "11": "PERderiv",
151
+ "12": "PERpart"
152
+ },
153
+ "id2reduced_id": {
154
+ "0": 0,
155
+ "1": 1,
156
+ "2": 1,
157
+ "3": 2,
158
+ "4": 2,
159
+ "5": 3,
160
+ "6": 3,
161
+ "7": 4,
162
+ "8": 4,
163
+ "9": 5,
164
+ "10": 5,
165
+ "11": 6,
166
+ "12": 6,
167
+ "13": 7,
168
+ "14": 7,
169
+ "15": 8,
170
+ "16": 8,
171
+ "17": 9,
172
+ "18": 9,
173
+ "19": 10,
174
+ "20": 10,
175
+ "21": 11,
176
+ "22": 11,
177
+ "23": 12,
178
+ "24": 12
179
+ },
180
+ "label2id": {
181
+ "LOC": 1,
182
+ "LOCderiv": 2,
183
+ "LOCpart": 3,
184
+ "O": 0,
185
+ "ORG": 4,
186
+ "ORGderiv": 5,
187
+ "ORGpart": 6,
188
+ "OTH": 7,
189
+ "OTHderiv": 8,
190
+ "OTHpart": 9,
191
+ "PER": 10,
192
+ "PERderiv": 11,
193
+ "PERpart": 12
194
+ },
195
+ "marker_max_length": 128,
196
+ "max_next_context": null,
197
+ "max_prev_context": null,
198
+ "model_max_length": 256,
199
+ "model_max_length_default": 512,
200
+ "model_type": "span-marker",
201
+ "span_marker_version": "1.2.4",
202
+ "torch_dtype": "float32",
203
+ "trained_with_document_context": false,
204
+ "transformers_version": "4.31.0",
205
+ "vocab_size": 32002
206
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:442b64d2bae474c8ed2cb693f0a1b655e6044458645d3dbbbf556b0082780030
3
+ size 440261353
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": true,
3
+ "clean_up_tokenization_spaces": true,
4
+ "cls_token": "[CLS]",
5
+ "do_basic_tokenize": true,
6
+ "do_lower_case": false,
7
+ "entity_max_length": 8,
8
+ "marker_max_length": 128,
9
+ "mask_token": "[MASK]",
10
+ "max_len": 512,
11
+ "model_max_length": 256,
12
+ "never_split": null,
13
+ "pad_token": "[PAD]",
14
+ "sep_token": "[SEP]",
15
+ "strip_accents": null,
16
+ "tokenize_chinese_chars": true,
17
+ "tokenizer_class": "ElectraTokenizer",
18
+ "unk_token": "[UNK]"
19
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a426129dcff8b96d4cad92ec44e66b9f709021a837605bb1794c4959603712d8
3
+ size 4027
vocab.txt ADDED
The diff for this file is too large to render. See raw diff