Datasets:

Languages:
English
ArXiv:
License:
holylovenia commited on
Commit
dd41fde
1 Parent(s): 75643e2

Upload men.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. men.py +187 -0
men.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from pathlib import Path
4
+ from typing import Dict, List, Tuple
5
+
6
+ import datasets
7
+
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+ from seacrowd.utils.schemas import kb_features
11
+
12
+ _CITATION = """\
13
+ @misc{chanthran2024malaysian,
14
+ title={Malaysian English News Decoded: A Linguistic Resource for Named Entity and Relation Extraction},
15
+ author={Mohan Raj Chanthran and Lay-Ki Soon and Huey Fang Ong and Bhawani Selvaretnam},
16
+ year={2024},
17
+ eprint={2402.14521},
18
+ archivePrefix={arXiv},
19
+ primaryClass={cs.CL}
20
+ }
21
+ """
22
+
23
+ _DATASETNAME = "men"
24
+
25
+ _DESCRIPTION = """\
26
+ The Malaysian English News (MEN) dataset includes 200 Malaysian English news article with human annotated entities and relations (in total 6,061 entities and 3,268 relation instances).
27
+ Malaysian English combines elements of standard English with Malay, Chinese, and Indian languages. Four human annotators were split into 2 groups, each group annotated 100 news articles
28
+ and inter-annotator agreement was calculated between 2 or more annotators working on the same task (entity annotation; F1-score 0.82, relation annotation; F1-score 0.51).
29
+ """
30
+
31
+ _HOMEPAGE = "https://github.com/mohanraj-nlp/MEN-Dataset/tree/main"
32
+
33
+ _LANGUAGES = ["eng"]
34
+
35
+ _LICENSE = Licenses.MIT.value
36
+
37
+ _LOCAL = False
38
+
39
+ _URLS = "https://github.com/mohanraj-nlp/MEN-Dataset/archive/refs/heads/main.zip"
40
+
41
+ _SUPPORTED_TASKS = [Tasks.RELATION_EXTRACTION, Tasks.NAMED_ENTITY_RECOGNITION]
42
+
43
+ _SOURCE_VERSION = "1.0.0"
44
+
45
+ _SEACROWD_VERSION = "2024.06.20"
46
+
47
+
48
+ class MENDataset(datasets.GeneratorBasedBuilder):
49
+ """The Malaysian English News dataset comprises 200 articles with 6,061 annotated entities and 3,268 relations.
50
+ Inter-annotator agreement for entity annotation was high (F1-score 0.82), but lower for relation annotation (F1-score 0.51)."""
51
+
52
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
53
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
54
+
55
+ BUILDER_CONFIGS = [
56
+ SEACrowdConfig(
57
+ name=f"{_DATASETNAME}_source",
58
+ version=SOURCE_VERSION,
59
+ description=f"{_DATASETNAME} source schema",
60
+ schema="source",
61
+ subset_id=f"{_DATASETNAME}",
62
+ ),
63
+ SEACrowdConfig(
64
+ name=f"{_DATASETNAME}_seacrowd_kb",
65
+ version=SEACROWD_VERSION,
66
+ description=f"{_DATASETNAME} SEACrowd schema",
67
+ schema="seacrowd_kb",
68
+ subset_id=f"{_DATASETNAME}",
69
+ ),
70
+ ]
71
+
72
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
73
+
74
+ def _info(self) -> datasets.DatasetInfo:
75
+ if self.config.schema == "source":
76
+ features = datasets.Features(
77
+ {
78
+ "article": datasets.Value("string"),
79
+ "entities": datasets.Sequence({"id": datasets.Value("int64"), "label": datasets.Value("string"), "position": {"start": datasets.Value("int32"), "end": datasets.Value("int32")}}),
80
+ "relations": datasets.Sequence({"id": datasets.Value("string"), "head": datasets.Value("int32"), "tail": datasets.Value("int32"), "relation": datasets.Value("string"), "relation_source": datasets.Value("string")}),
81
+ }
82
+ )
83
+
84
+ elif self.config.schema == "seacrowd_kb":
85
+ features = kb_features
86
+
87
+ return datasets.DatasetInfo(
88
+ description=_DESCRIPTION,
89
+ features=features,
90
+ homepage=_HOMEPAGE,
91
+ license=_LICENSE,
92
+ citation=_CITATION,
93
+ )
94
+
95
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
96
+ """Returns SplitGenerators."""
97
+ data_dir = dl_manager.download_and_extract(_URLS)
98
+
99
+ return [
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.TRAIN,
102
+ gen_kwargs={
103
+ "filepath": data_dir,
104
+ },
105
+ ),
106
+ ]
107
+
108
+ def _MEN_repo_splitter(self, filepath: Path) -> Dict:
109
+ articles = {}
110
+ entities = os.path.join(filepath, "MEN-Dataset-main/data/annotated_set.json")
111
+ relations = os.path.join(filepath, "MEN-Dataset-main/data/rel2id.json")
112
+
113
+ with open(entities, "r") as annot_json:
114
+ annots = json.load(annot_json)
115
+
116
+ article_ids = [i["id"] for i in annots]
117
+ for article_id in article_ids:
118
+ articles[article_id] = os.path.join(filepath, f"MEN-Dataset-main/data/article_text/{article_id}.txt")
119
+
120
+ data_dir = {"entities": entities, "articles": articles, "relations": relations}
121
+
122
+ return data_dir
123
+
124
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
125
+ """Yields examples as (key, example) tuples."""
126
+ filepath = self._MEN_repo_splitter(filepath)
127
+
128
+ with open(filepath["entities"], "r") as entities_json:
129
+ entities = json.load(entities_json)
130
+
131
+ articles = {}
132
+ for article_id in [i["id"] for i in entities]:
133
+ with open(filepath["articles"][article_id], "r") as article_txt:
134
+ article = article_txt.read()
135
+ articles[article_id] = article
136
+
137
+ i = 0
138
+ for item in entities:
139
+ article_id = item["id"]
140
+ entities = item["entities"]
141
+ relations = item["relations"]
142
+
143
+ i += 1
144
+ if self.config.schema == "source":
145
+ yield i, {
146
+ "article": articles[article_id],
147
+ "entities": [
148
+ {
149
+ "id": entity["id"],
150
+ "label": entity["label"],
151
+ "position": {
152
+ "start": entity["position"]["start_offset"],
153
+ "end": entity["position"]["end_offset"],
154
+ },
155
+ }
156
+ for entity in entities
157
+ ],
158
+ "relations": [{"id": relation["id"], "head": relation["head"], "tail": relation["tail"], "relation": relation["relation"], "relation_source": relation["relation_source"]} for relation in relations],
159
+ }
160
+
161
+ elif self.config.schema == "seacrowd_kb":
162
+ yield i, {
163
+ "id": str(i),
164
+ "passages": [{"id": article_id, "type": "text", "text": [articles[article_id]], "offsets": [[0, len(articles[article_id])]]}],
165
+ "entities": [
166
+ {
167
+ "id": f"{article_id}-entity-{entity['id']}",
168
+ "type": entity["label"],
169
+ "text": [articles[article_id][entity["position"]["start_offset"]:entity["position"]["end_offset"]]],
170
+ "offsets": [[entity["position"]["start_offset"], entity["position"]["end_offset"]]],
171
+ "normalized": [],
172
+ }
173
+ for entity in entities
174
+ ],
175
+ "events": [],
176
+ "coreferences": [],
177
+ "relations": [
178
+ {
179
+ "id": f"{article_id}-relation-{relation['id']}",
180
+ "type": relation["relation"],
181
+ "arg1_id": f"{article_id}-entity-{relation['head']}",
182
+ "arg2_id": f"{article_id}-entity-{relation['tail']}",
183
+ "normalized": [{"db_name": relation["relation_source"], "db_id": ""}],
184
+ }
185
+ for relation in relations
186
+ ],
187
+ }