eduagarcia commited on
Commit
7fcc229
1 Parent(s): 1de8de7

Add Assin2 RTE task

Browse files
Files changed (2) hide show
  1. .gitattributes +54 -54
  2. portuguese_benchmark.py +105 -17
.gitattributes CHANGED
@@ -1,54 +1,54 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
- *.model filter=lfs diff=lfs merge=lfs -text
14
- *.msgpack filter=lfs diff=lfs merge=lfs -text
15
- *.npy filter=lfs diff=lfs merge=lfs -text
16
- *.npz filter=lfs diff=lfs merge=lfs -text
17
- *.onnx filter=lfs diff=lfs merge=lfs -text
18
- *.ot filter=lfs diff=lfs merge=lfs -text
19
- *.parquet filter=lfs diff=lfs merge=lfs -text
20
- *.pb filter=lfs diff=lfs merge=lfs -text
21
- *.pickle filter=lfs diff=lfs merge=lfs -text
22
- *.pkl filter=lfs diff=lfs merge=lfs -text
23
- *.pt filter=lfs diff=lfs merge=lfs -text
24
- *.pth filter=lfs diff=lfs merge=lfs -text
25
- *.rar filter=lfs diff=lfs merge=lfs -text
26
- *.safetensors filter=lfs diff=lfs merge=lfs -text
27
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
- *.tar.* filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
36
- # Audio files - uncompressed
37
- *.pcm filter=lfs diff=lfs merge=lfs -text
38
- *.sam filter=lfs diff=lfs merge=lfs -text
39
- *.raw filter=lfs diff=lfs merge=lfs -text
40
- # Audio files - compressed
41
- *.aac filter=lfs diff=lfs merge=lfs -text
42
- *.flac filter=lfs diff=lfs merge=lfs -text
43
- *.mp3 filter=lfs diff=lfs merge=lfs -text
44
- *.ogg filter=lfs diff=lfs merge=lfs -text
45
- *.wav filter=lfs diff=lfs merge=lfs -text
46
- # Image files - uncompressed
47
- *.bmp filter=lfs diff=lfs merge=lfs -text
48
- *.gif filter=lfs diff=lfs merge=lfs -text
49
- *.png filter=lfs diff=lfs merge=lfs -text
50
- *.tiff filter=lfs diff=lfs merge=lfs -text
51
- # Image files - compressed
52
- *.jpg filter=lfs diff=lfs merge=lfs -text
53
- *.jpeg filter=lfs diff=lfs merge=lfs -text
54
- *.webp filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ # Audio files - uncompressed
37
+ *.pcm filter=lfs diff=lfs merge=lfs -text
38
+ *.sam filter=lfs diff=lfs merge=lfs -text
39
+ *.raw filter=lfs diff=lfs merge=lfs -text
40
+ # Audio files - compressed
41
+ *.aac filter=lfs diff=lfs merge=lfs -text
42
+ *.flac filter=lfs diff=lfs merge=lfs -text
43
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
44
+ *.ogg filter=lfs diff=lfs merge=lfs -text
45
+ *.wav filter=lfs diff=lfs merge=lfs -text
46
+ # Image files - uncompressed
47
+ *.bmp filter=lfs diff=lfs merge=lfs -text
48
+ *.gif filter=lfs diff=lfs merge=lfs -text
49
+ *.png filter=lfs diff=lfs merge=lfs -text
50
+ *.tiff filter=lfs diff=lfs merge=lfs -text
51
+ # Image files - compressed
52
+ *.jpg filter=lfs diff=lfs merge=lfs -text
53
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
54
+ *.webp filter=lfs diff=lfs merge=lfs -text
portuguese_benchmark.py CHANGED
@@ -2,9 +2,14 @@ import textwrap
2
  import datasets
3
  from typing import Dict, List, Optional, Union
4
 
5
- logger = datasets.logging.get_logger(__name__)
6
 
 
7
 
 
 
 
 
8
  _LENERBR_KWARGS = dict(
9
  name = "LeNER-Br",
10
  description=textwrap.dedent(
@@ -24,26 +29,65 @@ _LENERBR_KWARGS = dict(
24
  "test": "https://raw.githubusercontent.com/peluz/lener-br/master/leNER-Br/test/test.conll",
25
  },
26
  citation=textwrap.dedent(
27
- """\
28
- @InProceedings{luz_etal_propor2018,
29
- author = {Pedro H. {Luz de Araujo} and Te\'{o}filo E. {de Campos} and
30
- Renato R. R. {de Oliveira} and Matheus Stauffer and
31
- Samuel Couto and Paulo Bermejo},
32
- title = {{LeNER-Br}: a Dataset for Named Entity Recognition in {Brazilian} Legal Text},
33
- booktitle = {International Conference on the Computational Processing of Portuguese ({PROPOR})},
34
- publisher = {Springer},
35
- series = {Lecture Notes on Computer Science ({LNCS})},
36
- pages = {313--323},
37
- year = {2018},
38
- month = {September 24-26},
39
- address = {Canela, RS, Brazil},
40
- doi = {10.1007/978-3-319-99722-3_32},
41
- url = {https://teodecampos.github.io/LeNER-Br/},
42
- }"""
43
  ),
44
  url="https://teodecampos.github.io/LeNER-Br/",
45
  )
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  class PTBenchmarkConfig(datasets.BuilderConfig):
48
  """BuilderConfig for PTBenchmark."""
49
 
@@ -103,6 +147,21 @@ def _get_ner_dataset_info(config):
103
  )
104
  )
105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  def _conll_ner_generator(file_path):
107
  with open(file_path, encoding="utf-8") as f:
108
 
@@ -133,17 +192,43 @@ def _conll_ner_generator(file_path):
133
  "ner_tags": ner_tags,
134
  }
135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
 
137
  class PTBenchmark(datasets.GeneratorBasedBuilder):
138
  BUILDER_CONFIGS = [
139
  PTBenchmarkConfig(
140
  **_LENERBR_KWARGS
 
 
 
141
  )
142
  ]
143
 
144
  def _info(self) -> datasets.DatasetInfo:
145
  if self.config.task_type == "ner":
146
  return _get_ner_dataset_info(self.config)
 
 
147
 
148
  def _split_generators(self, dl_manager: datasets.DownloadManager):
149
  file_paths = dl_manager.download_and_extract(self.config.data_urls)
@@ -169,3 +254,6 @@ class PTBenchmark(datasets.GeneratorBasedBuilder):
169
  logger.info("⏳ Generating examples from = %s", file_path)
170
  if self.config.task_type == "ner":
171
  yield from _conll_ner_generator(file_path)
 
 
 
 
2
  import datasets
3
  from typing import Dict, List, Optional, Union
4
 
5
+ import xml.etree.ElementTree as ET
6
 
7
+ logger = datasets.logging.get_logger(__name__)
8
 
9
+ # Extracted from:
10
+ # - https://huggingface.co/datasets/lener_br
11
+ # - https://github.com/peluz/lener-br
12
+ # - https://teodecampos.github.io/LeNER-Br/
13
  _LENERBR_KWARGS = dict(
14
  name = "LeNER-Br",
15
  description=textwrap.dedent(
 
29
  "test": "https://raw.githubusercontent.com/peluz/lener-br/master/leNER-Br/test/test.conll",
30
  },
31
  citation=textwrap.dedent(
32
+ """\
33
+ @InProceedings{luz_etal_propor2018,
34
+ author = {Pedro H. {Luz de Araujo} and Te\'{o}filo E. {de Campos} and
35
+ Renato R. R. {de Oliveira} and Matheus Stauffer and
36
+ Samuel Couto and Paulo Bermejo},
37
+ title = {{LeNER-Br}: a Dataset for Named Entity Recognition in {Brazilian} Legal Text},
38
+ booktitle = {International Conference on the Computational Processing of Portuguese ({PROPOR})},
39
+ publisher = {Springer},
40
+ series = {Lecture Notes on Computer Science ({LNCS})},
41
+ pages = {313--323},
42
+ year = {2018},
43
+ month = {September 24-26},
44
+ address = {Canela, RS, Brazil},
45
+ doi = {10.1007/978-3-319-99722-3_32},
46
+ url = {https://teodecampos.github.io/LeNER-Br/},
47
+ }"""
48
  ),
49
  url="https://teodecampos.github.io/LeNER-Br/",
50
  )
51
 
52
+ # Extracted from:
53
+ # - https://huggingface.co/datasets/assin2
54
+ # - https://sites.google.com/view/assin2
55
+ # - https://github.com/ruanchaves/assin
56
+ _ASSIN2_BASE_KWARGS = dict(
57
+ description=textwrap.dedent(
58
+ """\
59
+ The ASSIN 2 corpus is composed of rather simple sentences. Following the procedures of SemEval 2014 Task 1.
60
+ The training and validation data are composed, respectively, of 6,500 and 500 sentence pairs in Brazilian Portuguese,
61
+ annotated for entailment and semantic similarity. Semantic similarity values range from 1 to 5, and text entailment
62
+ classes are either entailment or none. The test data are composed of approximately 3,000 sentence pairs with the same
63
+ annotation. All data were manually annotated."""
64
+ ),
65
+ data_urls={
66
+ "train": "https://github.com/ruanchaves/assin/raw/master/sources/assin2-train-only.xml",
67
+ "dev": "https://github.com/ruanchaves/assin/raw/master/sources/assin2-dev.xml",
68
+ "test": "https://github.com/ruanchaves/assin/raw/master/sources/assin2-test.xml",
69
+ },
70
+ citation=textwrap.dedent(
71
+ """\
72
+ @inproceedings{real2020assin,
73
+ title={The assin 2 shared task: a quick overview},
74
+ author={Real, Livy and Fonseca, Erick and Oliveira, Hugo Goncalo},
75
+ booktitle={International Conference on Computational Processing of the Portuguese Language},
76
+ pages={406--412},
77
+ year={2020},
78
+ organization={Springer}
79
+ }"""
80
+ ),
81
+ url="https://sites.google.com/view/assin2",
82
+ )
83
+ _ASSIN2_RTE_KWARGS = dict(
84
+ name = "assin2-rte",
85
+ task_type="rte",
86
+ label_classes=["NONE", "ENTAILMENT"],
87
+ **_ASSIN2_BASE_KWARGS
88
+ )
89
+
90
+
91
  class PTBenchmarkConfig(datasets.BuilderConfig):
92
  """BuilderConfig for PTBenchmark."""
93
 
 
147
  )
148
  )
149
 
150
+ def _get_rte_dataset_info(config):
151
+ return datasets.DatasetInfo(
152
+ description=config.description,
153
+ homepage=config.url,
154
+ citation=config.citation,
155
+ features=datasets.Features(
156
+ {
157
+ "id": datasets.Value("int32"),
158
+ "sentence1": datasets.Value("string"),
159
+ "sentence2": datasets.Value("string"),
160
+ "label": datasets.features.ClassLabel(names=config.label_classes),
161
+ }
162
+ )
163
+ )
164
+
165
  def _conll_ner_generator(file_path):
166
  with open(file_path, encoding="utf-8") as f:
167
 
 
192
  "ner_tags": ner_tags,
193
  }
194
 
195
+ def _assin2_rte_generator(file_path):
196
+ """Yields examples."""
197
+ id_ = 0
198
+
199
+ with open(file_path, "rb") as f:
200
+
201
+ tree = ET.parse(f)
202
+ root = tree.getroot()
203
+
204
+ for pair in root:
205
+
206
+ yield id_, {
207
+ "id": int(pair.attrib.get("id")),
208
+ "sentence1": pair.find(".//t").text,
209
+ "sentence2": pair.find(".//h").text,
210
+ #"relatedness_score": float(pair.attrib.get("similarity")),
211
+ "label": pair.attrib.get("entailment").upper(),
212
+ }
213
+
214
+ id_ += 1
215
+
216
 
217
  class PTBenchmark(datasets.GeneratorBasedBuilder):
218
  BUILDER_CONFIGS = [
219
  PTBenchmarkConfig(
220
  **_LENERBR_KWARGS
221
+ ),
222
+ PTBenchmarkConfig(
223
+ **_ASSIN2_RTE_KWARGS
224
  )
225
  ]
226
 
227
  def _info(self) -> datasets.DatasetInfo:
228
  if self.config.task_type == "ner":
229
  return _get_ner_dataset_info(self.config)
230
+ elif self.config.task_type == "rte":
231
+ return _get_rte_dataset_info(self.config)
232
 
233
  def _split_generators(self, dl_manager: datasets.DownloadManager):
234
  file_paths = dl_manager.download_and_extract(self.config.data_urls)
 
254
  logger.info("⏳ Generating examples from = %s", file_path)
255
  if self.config.task_type == "ner":
256
  yield from _conll_ner_generator(file_path)
257
+ elif self.config.task_type == "rte":
258
+ if "assin2" in self.config.name:
259
+ yield from _assin2_rte_generator(file_path)