system HF staff commited on
Commit
aee85ce
β€’
1 Parent(s): e998e45

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. polyglot_ner.py +49 -58
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  paperswithcode_id: polyglot-ner
3
  ---
4
 
 
1
  ---
2
+ pretty_name: Polyglot-NER
3
  paperswithcode_id: polyglot-ner
4
  ---
5
 
polyglot_ner.py CHANGED
@@ -16,9 +16,6 @@
16
  # Lint as: python3
17
  """The Polyglot-NER Dataset."""
18
 
19
-
20
- import os
21
-
22
  import datasets
23
 
24
 
@@ -76,16 +73,6 @@ _LANGUAGES = [
76
  "uk",
77
  ]
78
 
79
- _LANG_FILEPATHS = {
80
- lang: os.path.join(
81
- "acl_datasets",
82
- lang,
83
- "data" if lang != "zh" else "", # they're all lang/data/lang_wiki.conll except "zh"
84
- f"{lang}_wiki.conll",
85
- )
86
- for lang in _LANGUAGES
87
- }
88
-
89
  _DESCRIPTION = """\
90
  Polyglot-NER
91
  A training dataset automatically generated from Wikipedia and Freebase the task
@@ -107,10 +94,7 @@ class PolyglotNERConfig(datasets.BuilderConfig):
107
  def __init__(self, *args, languages=None, **kwargs):
108
  super().__init__(*args, version=datasets.Version(_VERSION, ""), **kwargs)
109
  self.languages = languages
110
-
111
- @property
112
- def filepaths(self):
113
- return [_LANG_FILEPATHS[lang] for lang in self.languages]
114
 
115
 
116
  class PolyglotNER(datasets.GeneratorBasedBuilder):
@@ -145,47 +129,54 @@ class PolyglotNER(datasets.GeneratorBasedBuilder):
145
 
146
  def _split_generators(self, dl_manager):
147
  """Returns SplitGenerators."""
148
- path = dl_manager.download_and_extract(_DATA_URL)
149
 
150
- return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"datapath": path})]
 
 
151
 
152
- def _generate_examples(self, datapath):
 
153
  sentence_counter = 0
154
- for filepath, lang in zip(self.config.filepaths, self.config.languages):
155
- filepath = os.path.join(datapath, filepath)
156
- with open(filepath, encoding="utf-8") as f:
157
- current_words = []
158
- current_ner = []
159
- for row in f:
160
- row = row.rstrip()
161
- if row:
162
- token, label = row.split("\t")
163
- current_words.append(token)
164
- current_ner.append(label)
165
- else:
166
- # New sentence
167
- if not current_words:
168
- # Consecutive empty lines will cause empty sentences
169
- continue
170
- assert len(current_words) == len(current_ner), "πŸ’” between len of words & ner"
171
- sentence = (
172
- sentence_counter,
173
- {
174
- "id": str(sentence_counter),
175
- "lang": lang,
176
- "words": current_words,
177
- "ner": current_ner,
178
- },
179
- )
180
- sentence_counter += 1
181
- current_words = []
182
- current_ner = []
183
- yield sentence
184
- # Don't forget last sentence in dataset 🧐
185
- if current_words:
186
- yield sentence_counter, {
187
- "id": str(sentence_counter),
188
- "lang": lang,
189
- "words": current_words,
190
- "ner": current_ner,
191
- }
 
 
 
 
 
16
  # Lint as: python3
17
  """The Polyglot-NER Dataset."""
18
 
 
 
 
19
  import datasets
20
 
21
 
 
73
  "uk",
74
  ]
75
 
 
 
 
 
 
 
 
 
 
 
76
  _DESCRIPTION = """\
77
  Polyglot-NER
78
  A training dataset automatically generated from Wikipedia and Freebase the task
 
94
  def __init__(self, *args, languages=None, **kwargs):
95
  super().__init__(*args, version=datasets.Version(_VERSION, ""), **kwargs)
96
  self.languages = languages
97
+ assert all(lang in _LANGUAGES for lang in languages), f"Invalid languages. Please use a subset of {_LANGUAGES}"
 
 
 
98
 
99
 
100
  class PolyglotNER(datasets.GeneratorBasedBuilder):
 
129
 
130
  def _split_generators(self, dl_manager):
131
  """Returns SplitGenerators."""
132
+ archive = dl_manager.download(_DATA_URL)
133
 
134
+ return [
135
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": dl_manager.iter_archive(archive)})
136
+ ]
137
 
138
+ def _generate_examples(self, files):
139
+ languages = list(self.config.languages)
140
  sentence_counter = 0
141
+ for path, f in files:
142
+ if not languages:
143
+ break
144
+ if path.endswith("_wiki.conll"):
145
+ lang = path.split("/")[1]
146
+ if lang in languages:
147
+ languages.remove(lang)
148
+ current_words = []
149
+ current_ner = []
150
+ for row in f:
151
+ row = row.decode("utf-8").rstrip()
152
+ if row:
153
+ token, label = row.split("\t")
154
+ current_words.append(token)
155
+ current_ner.append(label)
156
+ else:
157
+ # New sentence
158
+ if not current_words:
159
+ # Consecutive empty lines will cause empty sentences
160
+ continue
161
+ assert len(current_words) == len(current_ner), "πŸ’” between len of words & ner"
162
+ sentence = (
163
+ sentence_counter,
164
+ {
165
+ "id": str(sentence_counter),
166
+ "lang": lang,
167
+ "words": current_words,
168
+ "ner": current_ner,
169
+ },
170
+ )
171
+ sentence_counter += 1
172
+ current_words = []
173
+ current_ner = []
174
+ yield sentence
175
+ # Don't forget last sentence in dataset 🧐
176
+ if current_words:
177
+ yield sentence_counter, {
178
+ "id": str(sentence_counter),
179
+ "lang": lang,
180
+ "words": current_words,
181
+ "ner": current_ner,
182
+ }