jfrenz commited on
Commit
5123818
1 Parent(s): 536bc16

fixed for dataset_info generation

Browse files
Files changed (2) hide show
  1. dataset_infos.json +1 -0
  2. legalglue.py +16 -15
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"german_ler": {"description": "description", "citation": "@inproceedings{leitner2019fine,\nauthor = {Elena Leitner and Georg Rehm and Julian Moreno-Schneider},\ntitle = {{Fine-grained Named Entity Recognition in Legal Documents}},\nbooktitle = {Semantic Systems. The Power of AI and Knowledge\n Graphs. Proceedings of the 15th International Conference\n (SEMANTiCS 2019)},\nyear = 2019,\neditor = {Maribel Acosta and Philippe Cudr\u00e9-Mauroux and Maria\n Maleshkova and Tassilo Pellegrini and Harald Sack and York\n Sure-Vetter},\nkeywords = {aip},\npublisher = {Springer},\nseries = {Lecture Notes in Computer Science},\nnumber = {11702},\naddress = {Karlsruhe, Germany},\nmonth = 9,\nnote = {10/11 September 2019},\npages = {272--287},\npdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf}}\n", "homepage": "https://github.com/elenanereiss/Legal-Entity-Recognition", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 39, "names": ["B-AN", "B-EUN", "B-GRT", "B-GS", "B-INN", "B-LD", "B-LDS", "B-LIT", "B-MRK", "B-ORG", "B-PER", "B-RR", "B-RS", "B-ST", "B-STR", "B-UN", "B-VO", "B-VS", "B-VT", "I-AN", "I-EUN", "I-GRT", "I-GS", "I-INN", "I-LD", "I-LDS", "I-LIT", "I-MRK", "I-ORG", "I-PER", "I-RR", "I-RS", "I-ST", "I-STR", "I-UN", "I-VO", "I-VS", "I-VT", "O"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "legal_glue", "config_name": "german_ler", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 38854047, "num_examples": 66730, "dataset_name": "legal_glue"}}, "download_checksums": {"https://raw.githubusercontent.com/elenanereiss/Legal-Entity-Recognition/master/data/dataset_courts.zip": {"num_bytes": 4392913, "checksum": "f0427df5fb8bfdefe5228bc0fa0e75e9cfa782d1a78e32582cce096473c88567"}}, "download_size": 4392913, "post_processing_size": null, "dataset_size": 38854047, "size_in_bytes": 43246960}}
legalglue.py CHANGED
@@ -152,17 +152,17 @@ class LegalGLUE(datasets.GeneratorBasedBuilder):
152
 
153
  def _split_generators(self, dl_manager):
154
  #archive = dl_manager.download(self.config.data_url)
155
- if self.config_name == "german_ler":
156
  archive = dl_manager.download_and_extract(self.config.data_url)
157
- return datasets.SplitGenerator(
158
  name=datasets.Split.TRAIN,
159
  # These kwargs will be passed to _generate_examples
160
  gen_kwargs={
161
- "filepath": [os.path.join(archive, file) for file in self.config.data_files],
162
  "split": "train",
163
- #"files": dl_manager.iter_archive(archive),
164
  },
165
- )
166
  # else:
167
  # return [
168
  # datasets.SplitGenerator(
@@ -194,11 +194,12 @@ class LegalGLUE(datasets.GeneratorBasedBuilder):
194
  # ),
195
  # ]
196
 
197
- def _generate_examples(self, filepath, split):
198
- if self.config_name == "german_ler":
199
  texts, labels = [], []
200
- for path in filepath:
201
- with open(path, encoding="utf-8") as f:
 
202
  tokens = []
203
  tags = []
204
  for line in f:
@@ -208,15 +209,15 @@ class LegalGLUE(datasets.GeneratorBasedBuilder):
208
  labels.append(tags)
209
  tokens = []
210
  tags = []
211
- else:
212
- token, tag = line.split()
213
- tokens.append(token)
214
- tags.append(tag.rstrip())
215
  texts.append(tokens)
216
  labels.append(tags)
217
 
218
- for i in enumerate(texts):
219
- tokens = text[i]
220
  ner_tags = labels[i]
221
  yield i, {
222
  "id": str(i),
 
152
 
153
  def _split_generators(self, dl_manager):
154
  #archive = dl_manager.download(self.config.data_url)
155
+ if self.config.name == "german_ler":
156
  archive = dl_manager.download_and_extract(self.config.data_url)
157
+ return [datasets.SplitGenerator(
158
  name=datasets.Split.TRAIN,
159
  # These kwargs will be passed to _generate_examples
160
  gen_kwargs={
161
+ "filepath": self.config.data_files,
162
  "split": "train",
163
+ "files": [os.path.join(archive,file) for file in self.config.data_files]#dl_manager.iter_archive(archive),
164
  },
165
+ )]
166
  # else:
167
  # return [
168
  # datasets.SplitGenerator(
 
194
  # ),
195
  # ]
196
 
197
+ def _generate_examples(self, filepath, split, files):
198
+ if self.config.name == "german_ler":
199
  texts, labels = [], []
200
+ for file in files:
201
+ #if path in filepath:
202
+ with open(file, encoding="utf-8") as f:
203
  tokens = []
204
  tags = []
205
  for line in f:
 
209
  labels.append(tags)
210
  tokens = []
211
  tags = []
212
+ else:
213
+ token, tag = line.split()
214
+ tokens.append(token)
215
+ tags.append(tag.rstrip())
216
  texts.append(tokens)
217
  labels.append(tags)
218
 
219
+ for i,token in enumerate(texts):
220
+ tokens = texts[i]
221
  ner_tags = labels[i]
222
  yield i, {
223
  "id": str(i),