Update Entityrecongnitionjobs.py
Browse files- Entityrecongnitionjobs.py +175 -174
Entityrecongnitionjobs.py
CHANGED
@@ -67,161 +67,183 @@ class Conll2003Config(datasets.BuilderConfig):
|
|
67 |
super(Conll2003Config, self).__init__(**kwargs)
|
68 |
|
69 |
|
70 |
-
|
|
|
71 |
"""Conll2003 dataset."""
|
72 |
-
|
73 |
-
BUILDER_CONFIGS = [
|
74 |
-
|
75 |
-
]
|
76 |
-
|
77 |
-
def _info(self):
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
|
|
|
|
192 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
193 |
}
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
225 |
if tokens:
|
226 |
yield guid, {
|
227 |
"id": str(guid),
|
@@ -229,25 +251,4 @@ def _generate_examples(self, filepath):
|
|
229 |
"pos_tags": pos_tags,
|
230 |
"chunk_tags": chunk_tags,
|
231 |
"ner_tags": ner_tags,
|
232 |
-
}
|
233 |
-
guid += 1
|
234 |
-
tokens = []
|
235 |
-
pos_tags = []
|
236 |
-
chunk_tags = []
|
237 |
-
ner_tags = []
|
238 |
-
else:
|
239 |
-
# conll2003 tokens are space separated
|
240 |
-
splits = line.split(" ")
|
241 |
-
tokens.append(splits[0])
|
242 |
-
pos_tags.append(splits[1])
|
243 |
-
chunk_tags.append(splits[2])
|
244 |
-
ner_tags.append(splits[3].rstrip())
|
245 |
-
# last example
|
246 |
-
if tokens:
|
247 |
-
yield guid, {
|
248 |
-
"id": str(guid),
|
249 |
-
"tokens": tokens,
|
250 |
-
"pos_tags": pos_tags,
|
251 |
-
"chunk_tags": chunk_tags,
|
252 |
-
"ner_tags": ner_tags,
|
253 |
-
}
|
|
|
67 |
super(Conll2003Config, self).__init__(**kwargs)
|
68 |
|
69 |
|
70 |
+
class Conll2003(datasets.GeneratorBasedBuilder):
|
71 |
+
|
72 |
"""Conll2003 dataset."""
|
73 |
+
|
74 |
+
BUILDER_CONFIGS = [
|
75 |
+
Conll2003Config(name="Entityrecongnitionjobs", version=datasets.Version("1.0.0"), description="Conll2003 dataset"),
|
76 |
+
]
|
77 |
+
|
78 |
+
def _info(self):
|
79 |
+
return datasets.DatasetInfo(
|
80 |
+
description=_DESCRIPTION,
|
81 |
+
features=datasets.Features(
|
82 |
+
{
|
83 |
+
"id": datasets.Value("string"),
|
84 |
+
"tokens": datasets.Sequence(datasets.Value("string")),
|
85 |
+
"pos_tags": datasets.Sequence(
|
86 |
+
datasets.features.ClassLabel(
|
87 |
+
names=[
|
88 |
+
'"',
|
89 |
+
"''",
|
90 |
+
"#",
|
91 |
+
"$",
|
92 |
+
"(",
|
93 |
+
")",
|
94 |
+
",",
|
95 |
+
".",
|
96 |
+
":",
|
97 |
+
"``",
|
98 |
+
"CC",
|
99 |
+
"CD",
|
100 |
+
"DT",
|
101 |
+
"EX",
|
102 |
+
"FW",
|
103 |
+
"IN",
|
104 |
+
"JJ",
|
105 |
+
"JJR",
|
106 |
+
"JJS",
|
107 |
+
"LS",
|
108 |
+
"MD",
|
109 |
+
"NN",
|
110 |
+
"NNP",
|
111 |
+
"NNPS",
|
112 |
+
"NNS",
|
113 |
+
"NN|SYM",
|
114 |
+
"PDT",
|
115 |
+
"POS",
|
116 |
+
"PRP",
|
117 |
+
"PRP$",
|
118 |
+
"RB",
|
119 |
+
"RBR",
|
120 |
+
"RBS",
|
121 |
+
"RP",
|
122 |
+
"SYM",
|
123 |
+
"TO",
|
124 |
+
"UH",
|
125 |
+
"VB",
|
126 |
+
"VBD",
|
127 |
+
"VBG",
|
128 |
+
"VBN",
|
129 |
+
"VBP",
|
130 |
+
"VBZ",
|
131 |
+
"WDT",
|
132 |
+
"WP",
|
133 |
+
"WP$",
|
134 |
+
"WRB",
|
135 |
+
]
|
136 |
+
)
|
137 |
+
),
|
138 |
+
"chunk_tags": datasets.Sequence(
|
139 |
+
datasets.features.ClassLabel(
|
140 |
+
names=[
|
141 |
+
"O",
|
142 |
+
"B-ADJP",
|
143 |
+
"I-ADJP",
|
144 |
+
"B-ADVP",
|
145 |
+
"I-ADVP",
|
146 |
+
"B-CONJP",
|
147 |
+
"I-CONJP",
|
148 |
+
"B-INTJ",
|
149 |
+
"I-INTJ",
|
150 |
+
"B-LST",
|
151 |
+
"I-LST",
|
152 |
+
"B-NP",
|
153 |
+
"I-NP",
|
154 |
+
"B-PP",
|
155 |
+
"I-PP",
|
156 |
+
"B-PRT",
|
157 |
+
"I-PRT",
|
158 |
+
"B-SBAR",
|
159 |
+
"I-SBAR",
|
160 |
+
"B-UCP",
|
161 |
+
"I-UCP",
|
162 |
+
"B-VP",
|
163 |
+
"I-VP",
|
164 |
+
]
|
165 |
+
)
|
166 |
+
),
|
167 |
+
"ner_tags": datasets.Sequence(
|
168 |
+
datasets.features.ClassLabel(
|
169 |
+
names=[
|
170 |
+
"O",
|
171 |
+
"B-PER",
|
172 |
+
"I-PER",
|
173 |
+
"B-ORG",
|
174 |
+
"I-ORG",
|
175 |
+
"B-LOC",
|
176 |
+
"I-LOC",
|
177 |
+
"B-MISC",
|
178 |
+
"I-MISC",
|
179 |
+
"B-MET",
|
180 |
+
"I-MET",
|
181 |
+
"B-CONT",
|
182 |
+
"I-CONT",
|
183 |
+
"B-EDU",
|
184 |
+
"I-EDU",
|
185 |
+
"B-CER",
|
186 |
+
"I-CER",
|
187 |
+
"B-EXP",
|
188 |
+
"I-EXP",
|
189 |
+
"B-SAL",
|
190 |
+
"I-SAL",
|
191 |
+
]
|
192 |
+
)
|
193 |
+
),
|
194 |
+
}
|
195 |
),
|
196 |
+
supervised_keys=None,
|
197 |
+
homepage="",
|
198 |
+
citation=_CITATION,
|
199 |
+
)
|
200 |
+
|
201 |
+
def _split_generators(self, dl_manager):
|
202 |
+
"""Returns SplitGenerators."""
|
203 |
+
downloaded_file = dl_manager.download_and_extract(_URL)
|
204 |
+
data_files = {
|
205 |
+
"train": os.path.join(downloaded_file, _TRAINING_FILE),
|
206 |
+
"dev": os.path.join(downloaded_file, _DEV_FILE),
|
207 |
+
"test": os.path.join(downloaded_file, _TEST_FILE),
|
208 |
}
|
209 |
+
|
210 |
+
return [
|
211 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
|
212 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
|
213 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
|
214 |
+
]
|
215 |
+
|
216 |
+
def _generate_examples(self, filepath):
|
217 |
+
logger.info("⏳ Generating examples from = %s", filepath)
|
218 |
+
with open(filepath, encoding="utf-8") as f:
|
219 |
+
guid = 0
|
220 |
+
tokens = []
|
221 |
+
pos_tags = []
|
222 |
+
chunk_tags = []
|
223 |
+
ner_tags = []
|
224 |
+
for line in f:
|
225 |
+
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
|
226 |
+
if tokens:
|
227 |
+
yield guid, {
|
228 |
+
"id": str(guid),
|
229 |
+
"tokens": tokens,
|
230 |
+
"pos_tags": pos_tags,
|
231 |
+
"chunk_tags": chunk_tags,
|
232 |
+
"ner_tags": ner_tags,
|
233 |
+
}
|
234 |
+
guid += 1
|
235 |
+
tokens = []
|
236 |
+
pos_tags = []
|
237 |
+
chunk_tags = []
|
238 |
+
ner_tags = []
|
239 |
+
else:
|
240 |
+
# conll2003 tokens are space separated
|
241 |
+
splits = line.split(" ")
|
242 |
+
tokens.append(splits[0])
|
243 |
+
pos_tags.append(splits[1])
|
244 |
+
chunk_tags.append(splits[2])
|
245 |
+
ner_tags.append(splits[3].rstrip())
|
246 |
+
# last example
|
247 |
if tokens:
|
248 |
yield guid, {
|
249 |
"id": str(guid),
|
|
|
251 |
"pos_tags": pos_tags,
|
252 |
"chunk_tags": chunk_tags,
|
253 |
"ner_tags": ner_tags,
|
254 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|