gsarti commited on
Commit
e4521e4
1 Parent(s): bde480d

Create magpie.py

Browse files
Files changed (1) hide show
  1. magpie.py +131 -0
magpie.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+
3
+
4
+ logger = datasets.logging.get_logger(__name__)
5
+
6
+
7
+ _CITATION = """\
8
+ @inproceedings{haagsma-etal-2020-magpie,
9
+ title = "{MAGPIE}: A Large Corpus of Potentially Idiomatic Expressions",
10
+ author = "Haagsma, Hessel and
11
+ Bos, Johan and
12
+ Nissim, Malvina",
13
+ booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference",
14
+ month = may,
15
+ year = "2020",
16
+ address = "Marseille, France",
17
+ publisher = "European Language Resources Association",
18
+ url = "https://aclanthology.org/2020.lrec-1.35",
19
+ pages = "279--287",
20
+ language = "English",
21
+ ISBN = "979-10-95546-34-4",
22
+ }
23
+
24
+ @inproceedings{dankers-etal-2022-transformer,
25
+ title = "Can Transformer be Too Compositional? Analysing Idiom Processing in Neural Machine Translation",
26
+ author = "Dankers, Verna and
27
+ Lucas, Christopher and
28
+ Titov, Ivan",
29
+ booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
30
+ month = may,
31
+ year = "2022",
32
+ address = "Dublin, Ireland",
33
+ publisher = "Association for Computational Linguistics",
34
+ url = "https://aclanthology.org/2022.acl-long.252",
35
+ doi = "10.18653/v1/2022.acl-long.252",
36
+ pages = "3608--3626",
37
+ }
38
+
39
+ """
40
+
41
+ _DESCRIPTION = """\
42
+ The MAGPIE corpus is a large sense-annotated corpus of potentially idiomatic expressions (PIEs), based on the British National Corpus (BNC). Potentially idiomatic expressions are like idiomatic expressions, but the term also covers literal uses of idiomatic expressions, such as 'I leave work at the end of the day.' for the idiom 'at the end of the day'. This version of the dataset reflects the filtered subset used by Dankers et al. (2022) in their investigation on how PIEs are represented by NMT models. Authors use 37k samples annotated as fully figurative or literal, for 1482 idioms that contain nouns, numerals or adjectives that are colours (which they refer to as keywords). Because idioms show syntactic and morphological variability, the focus is mostly put on nouns. PIEs and their context are separated using the original corpus’s word-level annotations.
43
+ """
44
+
45
+ _HOMEPAGE = "https://github.com/vernadankers/mt_idioms"
46
+
47
+ _LICENSE = "CC-BY-4.0"
48
+
49
+ class MagpieConfig(datasets.BuilderConfig):
50
+ """BuilderConfig for MAGPIE."""
51
+
52
+ def __init__(self, **kwargs):
53
+ """BuilderConfig for MAGPIE.
54
+ Args:
55
+ features: : `list[string]`, list of the features that will appear in the
56
+ feature dict. Should not include "label".
57
+ **kwargs: keyword arguments forwarded to super.
58
+ """
59
+ super().__init__(**kwargs)
60
+ self.features = features
61
+ self.data_url = data_url
62
+
63
+
64
+ class Magpie(datasets.GeneratorBasedBuilder):
65
+ """MAGPIE: A Large Corpus of Potentially Idiomatic Expressions"""
66
+
67
+ BUILDER_CONFIGS = [
68
+ MagpieConfig(
69
+ name="magpie",
70
+ version=datasets.Version("1.0.0"),
71
+ features=["sentence", "annotation", "idiom", "usage", "variant", "pos_tags"],
72
+ data_url="https://huggingface.co/datasets/gsarti/magpie/resolve/main/magpie.tsv"
73
+ ),
74
+ MagpieConfig(
75
+ name="keywords",
76
+ version=datasets.Version("1.0.0"),
77
+ features=["idiom", "contains_noun", "pos_tags", "nouns"]
78
+ data_url="https://huggingface.co/datasets/gsarti/magpie/resolve/main/keywords.tsv"
79
+ )
80
+ ]
81
+
82
+ DEFAULT_CONFIG_NAME = "magpie"
83
+
84
+ def _info(self):
85
+ features = {feature: datasets.Value("string") for feature in self.config.features}
86
+ if self.config.name == "magpie":
87
+ features["annotation"] = datasets.features.Sequence(datasets.Value("uint8"))
88
+ else:
89
+ features["contains_noun"] = datasets.Value("bool")
90
+ features["nouns"] = datasets.features.Sequence(datasets.Value("string"))
91
+ features["pos_tags"] = datasets.features.Sequence(datasets.Value("string"))
92
+ return datasets.DatasetInfo(
93
+ description=_DESCRIPTION,
94
+ features=features,
95
+ supervised_keys=None,
96
+ homepage=_HOMEPAGE,
97
+ citation=_CITATION,
98
+ license=_LICENSE
99
+ )
100
+
101
+ def _split_generators(self, dl_manager):
102
+ data_file = dl_manager.download_and_extract(self.config.data_url)
103
+ return [
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.TRAIN,
106
+ gen_kwargs={
107
+ "filepath": data_file,
108
+ "split": "train",
109
+ "features": self.config.features,
110
+ },
111
+ ),
112
+ ]
113
+
114
+ def _generate_examples(self, filepath: str, split: str, features: List[str]):
115
+ """Yields examples as (key, example) tuples."""
116
+ with open(filepath, encoding="utf8") as f:
117
+ for id_, row in enumerate(f):
118
+ if id_ == 0:
119
+ continue
120
+ ex_split = None
121
+ fields = row.strip().split("\t")
122
+ if len(fields) < 5:
123
+ fields[1] = True if fields[1] == "yes" else False
124
+ fields[2] = fields[2].strip().split()
125
+ fields[3] = fields[3].strip().split()
126
+ else:
127
+ fields[1] = fields[1].strip().split()
128
+ fields[-1] = fields[-1].strip().split()
129
+ yield id_, {
130
+ k:v.strip() for k,v in zip(features, fields)
131
+ }