Datasets:

Languages:
Italian
Multilinguality:
monolingual
Size Categories:
unknown
Language Creators:
expert-generated
Annotations Creators:
expert-generated
Source Datasets:
original
ArXiv:
Tags:
License:
gsarti commited on
Commit
6b37eca
1 Parent(s): 849406c

Initial config

Browse files
Files changed (1) hide show
  1. itacola.py +146 -0
itacola.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import sys
3
+
4
+ import datasets
5
+ from typing import List
6
+
7
+ csv.field_size_limit(sys.maxsize)
8
+
9
+
10
+ _CITATION = """\
11
+ @inproceedings{trotta-etal-2021-itacola,
12
+ author = {Trotta, Daniela and Guarasci, Raffaele and Leonardelli, Elisa and Tonelli, Sara},
13
+ title = {Monolingual and Cross-Lingual Acceptability Judgments with the Italian {CoLA} corpus},
14
+ journal = {Arxiv preprint},
15
+ year = {2021},
16
+ }
17
+ """
18
+
19
+ _DESCRIPTION = """\
20
+ The Italian Corpus of Linguistic Acceptability includes almost 10k sentences taken from
21
+ linguistic literature with a binary annotation made by the original authors themselves.
22
+ The work is inspired by the English Corpus of Linguistic Acceptability (CoLA) by Warstadt et al.
23
+ Part of the dataset has been manually annotated to highlight 9 linguistic phenomena.
24
+ """
25
+
26
+ _HOMEPAGE = "https://github.com/dhfbk/ItaCoLA-dataset"
27
+
28
+ _LICENSE = "None"
29
+
30
+ _SPLITS = ["train", "test"]
31
+
32
+
33
+ class ItaColaConfig(datasets.BuilderConfig):
34
+ """BuilderConfig for ItaCoLA."""
35
+
36
+ def __init__(
37
+ self,
38
+ features,
39
+ data_url,
40
+ **kwargs,
41
+ ):
42
+ """
43
+ Args:
44
+ features: `list[string]`, list of the features that will appear in the
45
+ feature dict. Should not include "label".
46
+ data_url: `string`, url to download the zip file from.
47
+ **kwargs: keyword arguments forwarded to super.
48
+ """
49
+ super().__init__(version=datasets.Version("1.0.0"), **kwargs)
50
+ self.data_url = data_url
51
+ self.features = features
52
+
53
+
54
+ class ItaCola(datasets.GeneratorBasedBuilder):
55
+ VERSION = datasets.Version("1.0.0")
56
+
57
+ BUILDER_CONFIGS = [
58
+ ItaColaConfig(
59
+ name="scores",
60
+ features=["unique_id", "source", "acceptability", "sentence"],
61
+ data_url="https://raw.githubusercontent.com/dhfbk/ItaCoLA-dataset/main/ItaCoLA_dataset.tsv"
62
+ ),
63
+ ItaColaConfig(
64
+ name="phenomena",
65
+ features=[
66
+ "unique_id",
67
+ "source",
68
+ "acceptability",
69
+ "sentence",
70
+ "cleft_construction",
71
+ "copular_construction",
72
+ "subject_verb_agreement",
73
+ "wh_islands_violations",
74
+ "simple",
75
+ "question",
76
+ "auxiliary",
77
+ "bind",
78
+ "indefinite_pronouns",
79
+ ],
80
+ data_url="https://github.com/dhfbk/ItaCoLA-dataset/raw/main/ItaCoLA_dataset_phenomenon.tsv"
81
+ ),
82
+ ]
83
+
84
+ DEFAULT_CONFIG_NAME = "scores"
85
+
86
+ def _info(self):
87
+ features = {feature: datasets.Value("int32") for feature in self.config.features}
88
+ features["source"] = datasets.Value("string")
89
+ features["sentence"] = datasets.Value("string")
90
+ return datasets.DatasetInfo(
91
+ description=_DESCRIPTION,
92
+ features=datasets.Features(features),
93
+ homepage=_HOMEPAGE,
94
+ license=_LICENSE,
95
+ citation=_CITATION,
96
+ )
97
+
98
+ def _split_generators(self, dl_manager):
99
+ """Returns SplitGenerators."""
100
+ data_file = dl_manager.download_and_extract(self.config.data_url)
101
+ if self.config.name == "scores":
102
+ return [
103
+ datasets.SplitGenerator(
104
+ name=datasets.Split.TRAIN,
105
+ gen_kwargs={
106
+ "filepath": data_file,
107
+ "split": "train",
108
+ "features": self.config.features,
109
+ },
110
+ ),
111
+ datasets.SplitGenerator(
112
+ name=datasets.Split.TEST,
113
+ gen_kwargs={
114
+ "filepath": data_file,
115
+ "split": "test",
116
+ "features": self.config.features,
117
+ },
118
+ ),
119
+ ]
120
+ else:
121
+ return [
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.TRAIN,
124
+ gen_kwargs={
125
+ "filepath": data_file,
126
+ "split": "train",
127
+ "features": self.config.features,
128
+ },
129
+ ),
130
+ ]
131
+
132
+ def _generate_examples(self, filepath: str, split: str, features: List[str]):
133
+ """Yields examples as (key, example) tuples."""
134
+ with open(filepath, encoding="utf8") as f:
135
+ for id_, row in f:
136
+ if id_ == 0:
137
+ continue
138
+ ex_split = None
139
+ fields = row.strip().split("\t")
140
+ if len(fields) > 5:
141
+ fields, ex_split = fields[:-1], fields[-1]
142
+ if ex_split.strip() != split:
143
+ continue
144
+ yield id_, {
145
+ k:v.strip() for k,v in zip(features, fields)
146
+ }