Geawher commited on
Commit
66610db
1 Parent(s): 1db603b

Update Entityrecongnitionjobs.py

Browse files
Files changed (1) hide show
  1. Entityrecongnitionjobs.py +175 -174
Entityrecongnitionjobs.py CHANGED
@@ -67,161 +67,183 @@ class Conll2003Config(datasets.BuilderConfig):
67
  super(Conll2003Config, self).__init__(**kwargs)
68
 
69
 
70
- #class Conll2003(datasets.GeneratorBasedBuilder):
 
71
  """Conll2003 dataset."""
72
-
73
- BUILDER_CONFIGS = [
74
- Conll2003Config(name="Entityrecongnitionjobs", version=datasets.Version("1.0.0"), description="Conll2003 dataset"),
75
- ]
76
-
77
- def _info(self):
78
- return datasets.DatasetInfo(
79
- description=_DESCRIPTION,
80
- features=datasets.Features(
81
- {
82
- "id": datasets.Value("string"),
83
- "tokens": datasets.Sequence(datasets.Value("string")),
84
- "pos_tags": datasets.Sequence(
85
- datasets.features.ClassLabel(
86
- names=[
87
- '"',
88
- "''",
89
- "#",
90
- "$",
91
- "(",
92
- ")",
93
- ",",
94
- ".",
95
- ":",
96
- "``",
97
- "CC",
98
- "CD",
99
- "DT",
100
- "EX",
101
- "FW",
102
- "IN",
103
- "JJ",
104
- "JJR",
105
- "JJS",
106
- "LS",
107
- "MD",
108
- "NN",
109
- "NNP",
110
- "NNPS",
111
- "NNS",
112
- "NN|SYM",
113
- "PDT",
114
- "POS",
115
- "PRP",
116
- "PRP$",
117
- "RB",
118
- "RBR",
119
- "RBS",
120
- "RP",
121
- "SYM",
122
- "TO",
123
- "UH",
124
- "VB",
125
- "VBD",
126
- "VBG",
127
- "VBN",
128
- "VBP",
129
- "VBZ",
130
- "WDT",
131
- "WP",
132
- "WP$",
133
- "WRB",
134
- ]
135
- )
136
- ),
137
- "chunk_tags": datasets.Sequence(
138
- datasets.features.ClassLabel(
139
- names=[
140
- "O",
141
- "B-ADJP",
142
- "I-ADJP",
143
- "B-ADVP",
144
- "I-ADVP",
145
- "B-CONJP",
146
- "I-CONJP",
147
- "B-INTJ",
148
- "I-INTJ",
149
- "B-LST",
150
- "I-LST",
151
- "B-NP",
152
- "I-NP",
153
- "B-PP",
154
- "I-PP",
155
- "B-PRT",
156
- "I-PRT",
157
- "B-SBAR",
158
- "I-SBAR",
159
- "B-UCP",
160
- "I-UCP",
161
- "B-VP",
162
- "I-VP",
163
- ]
164
- )
165
- ),
166
- "ner_tags": datasets.Sequence(
167
- datasets.features.ClassLabel(
168
- names=[
169
- "O",
170
- "B-PER",
171
- "I-PER",
172
- "B-ORG",
173
- "I-ORG",
174
- "B-LOC",
175
- "I-LOC",
176
- "B-MISC",
177
- "I-MISC",
178
- "B-MET",
179
- "I-MET",
180
- "B-CONT",
181
- "I-CONT",
182
- "B-EDU",
183
- "I-EDU",
184
- "B-CER",
185
- "I-CER",
186
- "B-EXP",
187
- "I-EXP",
188
- "B-SAL",
189
- "I-SAL",
190
- ]
191
- )
 
 
192
  ),
 
 
 
 
 
 
 
 
 
 
 
 
193
  }
194
- ),
195
- supervised_keys=None,
196
- homepage="",
197
- citation=_CITATION,
198
- )
199
-
200
- def _split_generators(self, dl_manager):
201
- """Returns SplitGenerators."""
202
- downloaded_file = dl_manager.download_and_extract(_URL)
203
- data_files = {
204
- "train": os.path.join(downloaded_file, _TRAINING_FILE),
205
- "dev": os.path.join(downloaded_file, _DEV_FILE),
206
- "test": os.path.join(downloaded_file, _TEST_FILE),
207
- }
208
-
209
- return [
210
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
211
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
212
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
213
- ]
214
-
215
- def _generate_examples(self, filepath):
216
- logger.info(" Generating examples from = %s", filepath)
217
- with open(filepath, encoding="utf-8") as f:
218
- guid = 0
219
- tokens = []
220
- pos_tags = []
221
- chunk_tags = []
222
- ner_tags = []
223
- for line in f:
224
- if line.startswith("-DOCSTART-") or line == "" or line == "\n":
 
 
 
 
 
 
 
225
  if tokens:
226
  yield guid, {
227
  "id": str(guid),
@@ -229,25 +251,4 @@ def _generate_examples(self, filepath):
229
  "pos_tags": pos_tags,
230
  "chunk_tags": chunk_tags,
231
  "ner_tags": ner_tags,
232
- }
233
- guid += 1
234
- tokens = []
235
- pos_tags = []
236
- chunk_tags = []
237
- ner_tags = []
238
- else:
239
- # conll2003 tokens are space separated
240
- splits = line.split(" ")
241
- tokens.append(splits[0])
242
- pos_tags.append(splits[1])
243
- chunk_tags.append(splits[2])
244
- ner_tags.append(splits[3].rstrip())
245
- # last example
246
- if tokens:
247
- yield guid, {
248
- "id": str(guid),
249
- "tokens": tokens,
250
- "pos_tags": pos_tags,
251
- "chunk_tags": chunk_tags,
252
- "ner_tags": ner_tags,
253
- }
 
67
  super(Conll2003Config, self).__init__(**kwargs)
68
 
69
 
70
+ class Conll2003(datasets.GeneratorBasedBuilder):
71
+
72
  """Conll2003 dataset."""
73
+
74
+ BUILDER_CONFIGS = [
75
+ Conll2003Config(name="Entityrecongnitionjobs", version=datasets.Version("1.0.0"), description="Conll2003 dataset"),
76
+ ]
77
+
78
+ def _info(self):
79
+ return datasets.DatasetInfo(
80
+ description=_DESCRIPTION,
81
+ features=datasets.Features(
82
+ {
83
+ "id": datasets.Value("string"),
84
+ "tokens": datasets.Sequence(datasets.Value("string")),
85
+ "pos_tags": datasets.Sequence(
86
+ datasets.features.ClassLabel(
87
+ names=[
88
+ '"',
89
+ "''",
90
+ "#",
91
+ "$",
92
+ "(",
93
+ ")",
94
+ ",",
95
+ ".",
96
+ ":",
97
+ "``",
98
+ "CC",
99
+ "CD",
100
+ "DT",
101
+ "EX",
102
+ "FW",
103
+ "IN",
104
+ "JJ",
105
+ "JJR",
106
+ "JJS",
107
+ "LS",
108
+ "MD",
109
+ "NN",
110
+ "NNP",
111
+ "NNPS",
112
+ "NNS",
113
+ "NN|SYM",
114
+ "PDT",
115
+ "POS",
116
+ "PRP",
117
+ "PRP$",
118
+ "RB",
119
+ "RBR",
120
+ "RBS",
121
+ "RP",
122
+ "SYM",
123
+ "TO",
124
+ "UH",
125
+ "VB",
126
+ "VBD",
127
+ "VBG",
128
+ "VBN",
129
+ "VBP",
130
+ "VBZ",
131
+ "WDT",
132
+ "WP",
133
+ "WP$",
134
+ "WRB",
135
+ ]
136
+ )
137
+ ),
138
+ "chunk_tags": datasets.Sequence(
139
+ datasets.features.ClassLabel(
140
+ names=[
141
+ "O",
142
+ "B-ADJP",
143
+ "I-ADJP",
144
+ "B-ADVP",
145
+ "I-ADVP",
146
+ "B-CONJP",
147
+ "I-CONJP",
148
+ "B-INTJ",
149
+ "I-INTJ",
150
+ "B-LST",
151
+ "I-LST",
152
+ "B-NP",
153
+ "I-NP",
154
+ "B-PP",
155
+ "I-PP",
156
+ "B-PRT",
157
+ "I-PRT",
158
+ "B-SBAR",
159
+ "I-SBAR",
160
+ "B-UCP",
161
+ "I-UCP",
162
+ "B-VP",
163
+ "I-VP",
164
+ ]
165
+ )
166
+ ),
167
+ "ner_tags": datasets.Sequence(
168
+ datasets.features.ClassLabel(
169
+ names=[
170
+ "O",
171
+ "B-PER",
172
+ "I-PER",
173
+ "B-ORG",
174
+ "I-ORG",
175
+ "B-LOC",
176
+ "I-LOC",
177
+ "B-MISC",
178
+ "I-MISC",
179
+ "B-MET",
180
+ "I-MET",
181
+ "B-CONT",
182
+ "I-CONT",
183
+ "B-EDU",
184
+ "I-EDU",
185
+ "B-CER",
186
+ "I-CER",
187
+ "B-EXP",
188
+ "I-EXP",
189
+ "B-SAL",
190
+ "I-SAL",
191
+ ]
192
+ )
193
+ ),
194
+ }
195
  ),
196
+ supervised_keys=None,
197
+ homepage="",
198
+ citation=_CITATION,
199
+ )
200
+
201
+ def _split_generators(self, dl_manager):
202
+ """Returns SplitGenerators."""
203
+ downloaded_file = dl_manager.download_and_extract(_URL)
204
+ data_files = {
205
+ "train": os.path.join(downloaded_file, _TRAINING_FILE),
206
+ "dev": os.path.join(downloaded_file, _DEV_FILE),
207
+ "test": os.path.join(downloaded_file, _TEST_FILE),
208
  }
209
+
210
+ return [
211
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
212
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
213
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
214
+ ]
215
+
216
+ def _generate_examples(self, filepath):
217
+ logger.info("⏳ Generating examples from = %s", filepath)
218
+ with open(filepath, encoding="utf-8") as f:
219
+ guid = 0
220
+ tokens = []
221
+ pos_tags = []
222
+ chunk_tags = []
223
+ ner_tags = []
224
+ for line in f:
225
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
226
+ if tokens:
227
+ yield guid, {
228
+ "id": str(guid),
229
+ "tokens": tokens,
230
+ "pos_tags": pos_tags,
231
+ "chunk_tags": chunk_tags,
232
+ "ner_tags": ner_tags,
233
+ }
234
+ guid += 1
235
+ tokens = []
236
+ pos_tags = []
237
+ chunk_tags = []
238
+ ner_tags = []
239
+ else:
240
+ # conll2003 tokens are space separated
241
+ splits = line.split(" ")
242
+ tokens.append(splits[0])
243
+ pos_tags.append(splits[1])
244
+ chunk_tags.append(splits[2])
245
+ ner_tags.append(splits[3].rstrip())
246
+ # last example
247
  if tokens:
248
  yield guid, {
249
  "id": str(guid),
 
251
  "pos_tags": pos_tags,
252
  "chunk_tags": chunk_tags,
253
  "ner_tags": ner_tags,
254
+ }