jfrenz commited on
Commit
75b9028
1 Parent(s): 5123818

remove empty trailing sentences

Browse files
Files changed (2) hide show
  1. dataset_infos.json +1 -1
  2. legalglue.py +60 -3
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"german_ler": {"description": "description", "citation": "@inproceedings{leitner2019fine,\nauthor = {Elena Leitner and Georg Rehm and Julian Moreno-Schneider},\ntitle = {{Fine-grained Named Entity Recognition in Legal Documents}},\nbooktitle = {Semantic Systems. The Power of AI and Knowledge\n Graphs. Proceedings of the 15th International Conference\n (SEMANTiCS 2019)},\nyear = 2019,\neditor = {Maribel Acosta and Philippe Cudr\u00e9-Mauroux and Maria\n Maleshkova and Tassilo Pellegrini and Harald Sack and York\n Sure-Vetter},\nkeywords = {aip},\npublisher = {Springer},\nseries = {Lecture Notes in Computer Science},\nnumber = {11702},\naddress = {Karlsruhe, Germany},\nmonth = 9,\nnote = {10/11 September 2019},\npages = {272--287},\npdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf}}\n", "homepage": "https://github.com/elenanereiss/Legal-Entity-Recognition", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 39, "names": ["B-AN", "B-EUN", "B-GRT", "B-GS", "B-INN", "B-LD", "B-LDS", "B-LIT", "B-MRK", "B-ORG", "B-PER", "B-RR", "B-RS", "B-ST", "B-STR", "B-UN", "B-VO", "B-VS", "B-VT", "I-AN", "I-EUN", "I-GRT", "I-GS", "I-INN", "I-LD", "I-LDS", "I-LIT", "I-MRK", "I-ORG", "I-PER", "I-RR", "I-RS", "I-ST", "I-STR", "I-UN", "I-VO", "I-VS", "I-VT", "O"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "legal_glue", "config_name": "german_ler", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 38854047, "num_examples": 66730, "dataset_name": "legal_glue"}}, "download_checksums": {"https://raw.githubusercontent.com/elenanereiss/Legal-Entity-Recognition/master/data/dataset_courts.zip": {"num_bytes": 4392913, "checksum": "f0427df5fb8bfdefe5228bc0fa0e75e9cfa782d1a78e32582cce096473c88567"}}, "download_size": 4392913, "post_processing_size": null, "dataset_size": 38854047, "size_in_bytes": 43246960}}
 
1
+ {"german_ler": {"description": "description", "citation": "@inproceedings{leitner2019fine,\nauthor = {Elena Leitner and Georg Rehm and Julian Moreno-Schneider},\ntitle = {{Fine-grained Named Entity Recognition in Legal Documents}},\nbooktitle = {Semantic Systems. The Power of AI and Knowledge\n Graphs. Proceedings of the 15th International Conference\n (SEMANTiCS 2019)},\nyear = 2019,\neditor = {Maribel Acosta and Philippe Cudr\u00e9-Mauroux and Maria\n Maleshkova and Tassilo Pellegrini and Harald Sack and York\n Sure-Vetter},\nkeywords = {aip},\npublisher = {Springer},\nseries = {Lecture Notes in Computer Science},\nnumber = {11702},\naddress = {Karlsruhe, Germany},\nmonth = 9,\nnote = {10/11 September 2019},\npages = {272--287},\npdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf}}\n", "homepage": "https://github.com/elenanereiss/Legal-Entity-Recognition", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 39, "names": ["B-AN", "B-EUN", "B-GRT", "B-GS", "B-INN", "B-LD", "B-LDS", "B-LIT", "B-MRK", "B-ORG", "B-PER", "B-RR", "B-RS", "B-ST", "B-STR", "B-UN", "B-VO", "B-VS", "B-VT", "I-AN", "I-EUN", "I-GRT", "I-GS", "I-INN", "I-LD", "I-LDS", "I-LIT", "I-MRK", "I-ORG", "I-PER", "I-RR", "I-RS", "I-ST", "I-STR", "I-UN", "I-VO", "I-VS", "I-VT", "O"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "legal_glue", "config_name": "german_ler", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 38853928, "num_examples": 66723, "dataset_name": "legal_glue"}}, "download_checksums": {"https://raw.githubusercontent.com/elenanereiss/Legal-Entity-Recognition/master/data/dataset_courts.zip": {"num_bytes": 4392913, "checksum": "f0427df5fb8bfdefe5228bc0fa0e75e9cfa782d1a78e32582cce096473c88567"}}, "download_size": 4392913, "post_processing_size": null, "dataset_size": 38853928, "size_in_bytes": 43246841}}
legalglue.py CHANGED
@@ -68,6 +68,21 @@ GERMAN_LER = [
68
  "I-VT",
69
  "O"]
70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
  class LegalGlueConfig(datasets.BuilderConfig):
73
  """BuilderConfig for LegalGLUE."""
@@ -128,7 +143,45 @@ class LegalGLUE(datasets.GeneratorBasedBuilder):
128
  pages = {272--287},
129
  pdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf}}
130
  """)
131
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  ]
133
 
134
  def _info(self):
@@ -163,6 +216,9 @@ class LegalGLUE(datasets.GeneratorBasedBuilder):
163
  "files": [os.path.join(archive,file) for file in self.config.data_files]#dl_manager.iter_archive(archive),
164
  },
165
  )]
 
 
 
166
  # else:
167
  # return [
168
  # datasets.SplitGenerator(
@@ -213,8 +269,9 @@ class LegalGLUE(datasets.GeneratorBasedBuilder):
213
  token, tag = line.split()
214
  tokens.append(token)
215
  tags.append(tag.rstrip())
216
- texts.append(tokens)
217
- labels.append(tags)
 
218
 
219
  for i,token in enumerate(texts):
220
  tokens = texts[i]
 
68
  "I-VT",
69
  "O"]
70
 
71
+ LENER_BR=[
72
+ "O",
73
+ "B-ORGANIZACAO",
74
+ "I-ORGANIZACAO",
75
+ "B-PESSOA",
76
+ "I-PESSOA",
77
+ "B-TEMPO",
78
+ "I-TEMPO",
79
+ "B-LOCAL",
80
+ "I-LOCAL",
81
+ "B-LEGISLACAO",
82
+ "I-LEGISLACAO",
83
+ "B-JURISPRUDENCIA",
84
+ "I-JURISPRUDENCIA",
85
+ ]
86
 
87
  class LegalGlueConfig(datasets.BuilderConfig):
88
  """BuilderConfig for LegalGLUE."""
 
143
  pages = {272--287},
144
  pdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf}}
145
  """)
146
+ ),
147
+ # LegalGlueConfig(
148
+ # name="lener_br",
149
+ # description=textwrap.dedent(
150
+ # """\
151
+ # LeNER-Br is a Portuguese language dataset for named entity recognition
152
+ # applied to legal documents. LeNER-Br consists entirely of manually annotated
153
+ # legislation and legal cases texts and contains tags for persons, locations,
154
+ # time entities, organizations, legislation and legal cases.
155
+ # To compose the dataset, 66 legal documents from several Brazilian Courts were
156
+ # collected. Courts of superior and state levels were considered, such as Supremo
157
+ # Tribunal Federal, Superior Tribunal de Justiça, Tribunal de Justiça de Minas
158
+ # Gerais and Tribunal de Contas da União. In addition, four legislation documents
159
+ # were collected, such as "Lei Maria da Penha", giving a total of 70 documents
160
+ # """
161
+ # ),
162
+ # label_classes=LENER_BR,
163
+ # multi_label=False,
164
+ # data_url="https://github.com/peluz/lener-br/raw/master/leNER-Br/",
165
+ # data_files=["train/train.conll", "dev/dev.conll", "test/test.conll"],
166
+ # homepage="https://cic.unb.br/~teodecampos/LeNER-Br/",
167
+ # citation=textwrap.dedent("""\
168
+ # @inproceedings{luz_etal_propor2018,
169
+ # author = {Pedro H. {Luz de Araujo} and Te\'{o}filo E. {de Campos} and
170
+ # Renato R. R. {de Oliveira} and Matheus Stauffer and
171
+ # Samuel Couto and Paulo Bermejo},
172
+ # title = {{LeNER-Br}: a Dataset for Named Entity Recognition in {Brazilian} Legal Text},
173
+ # booktitle = {International Conference on the Computational Processing of Portuguese ({PROPOR})},
174
+ # publisher = {Springer},
175
+ # series = {Lecture Notes on Computer Science ({LNCS})},
176
+ # pages = {313--323},
177
+ # year = {2018},
178
+ # month = {September 24-26},
179
+ # address = {Canela, RS, Brazil},
180
+ # doi = {10.1007/978-3-319-99722-3_32},
181
+ # url = {https://cic.unb.br/~teodecampos/LeNER-Br/},
182
+ # }
183
+ # """)
184
+ # )
185
  ]
186
 
187
  def _info(self):
 
216
  "files": [os.path.join(archive,file) for file in self.config.data_files]#dl_manager.iter_archive(archive),
217
  },
218
  )]
219
+ #elif self.config_name == "lener_br":
220
+
221
+
222
  # else:
223
  # return [
224
  # datasets.SplitGenerator(
 
269
  token, tag = line.split()
270
  tokens.append(token)
271
  tags.append(tag.rstrip())
272
+ if tokens:
273
+ texts.append(tokens)
274
+ labels.append(tags)
275
 
276
  for i,token in enumerate(texts):
277
  tokens = texts[i]