Datasets:

Modalities:
Text
Size:
< 1K
Libraries:
Datasets
dibyaaaaax commited on
Commit
7b5bab5
1 Parent(s): c3434a3

Upload nus.py

Browse files
Files changed (1) hide show
  1. nus.py +162 -0
nus.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+ # _SPLIT = ['test']
5
+ _CITATION = """\
6
+ @InProceedings{10.1007/978-3-540-77094-7_41,
7
+ author="Nguyen, Thuy Dung
8
+ and Kan, Min-Yen",
9
+ editor="Goh, Dion Hoe-Lian
10
+ and Cao, Tru Hoang
11
+ and Solvberg, Ingeborg Torvik
12
+ and Rasmussen, Edie",
13
+ title="Keyphrase Extraction in Scientific Publications",
14
+ booktitle="Asian Digital Libraries. Looking Back 10 Years and Forging New Frontiers",
15
+ year="2007",
16
+ publisher="Springer Berlin Heidelberg",
17
+ address="Berlin, Heidelberg",
18
+ pages="317--326",
19
+ isbn="978-3-540-77094-7"
20
+ }
21
+
22
+ """
23
+
24
+ _DESCRIPTION = """\
25
+
26
+ """
27
+
28
+ _HOMEPAGE = ""
29
+
30
+ # TODO: Add the licence for the dataset here if you can find it
31
+ _LICENSE = ""
32
+
33
+ # TODO: Add link to the official dataset URLs here
34
+
35
+ _URLS = {
36
+ "test": "test.jsonl"
37
+ }
38
+
39
+
40
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
41
+ class NUS(datasets.GeneratorBasedBuilder):
42
+ """TODO: Short description of my dataset."""
43
+
44
+ VERSION = datasets.Version("0.0.1")
45
+
46
+ BUILDER_CONFIGS = [
47
+ datasets.BuilderConfig(name="extraction", version=VERSION,
48
+ description="This part of my dataset covers extraction"),
49
+ datasets.BuilderConfig(name="generation", version=VERSION,
50
+ description="This part of my dataset covers generation"),
51
+ datasets.BuilderConfig(name="raw", version=VERSION, description="This part of my dataset covers the raw data"),
52
+ ]
53
+
54
+ DEFAULT_CONFIG_NAME = "extraction"
55
+
56
+ def _info(self):
57
+ if self.config.name == "extraction": # This is the name of the configuration selected in BUILDER_CONFIGS above
58
+ features = datasets.Features(
59
+ {
60
+ "id": datasets.Value("int64"),
61
+ "document": datasets.features.Sequence(datasets.Value("string")),
62
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string"))
63
+
64
+ }
65
+ )
66
+ elif self.config.name == "generation":
67
+ features = datasets.Features(
68
+ {
69
+ "id": datasets.Value("int64"),
70
+ "document": datasets.features.Sequence(datasets.Value("string")),
71
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
72
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string"))
73
+
74
+ }
75
+ )
76
+ else:
77
+ features = datasets.Features(
78
+ {
79
+ "id": datasets.Value("int64"),
80
+ "document": datasets.features.Sequence(datasets.Value("string")),
81
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string")),
82
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
83
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
84
+ "other_metadata": datasets.features.Sequence(
85
+ {
86
+ "text": datasets.features.Sequence(datasets.Value("string")),
87
+ "bio_tags": datasets.features.Sequence(datasets.Value("string"))
88
+ }
89
+ )
90
+
91
+ }
92
+ )
93
+ return datasets.DatasetInfo(
94
+ # This is the description that will appear on the datasets page.
95
+ description=_DESCRIPTION,
96
+ # This defines the different columns of the dataset and their types
97
+ features=features,
98
+ homepage=_HOMEPAGE,
99
+ # License for the dataset if available
100
+ license=_LICENSE,
101
+ # Citation for the dataset
102
+ citation=_CITATION,
103
+ )
104
+
105
+ def _split_generators(self, dl_manager):
106
+
107
+ data_dir = dl_manager.download_and_extract(_URLS)
108
+ return [
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TRAIN,
111
+ # These kwargs will be passed to _generate_examples
112
+ gen_kwargs={
113
+ "filepath": data_dir['train'],
114
+ "split": "train",
115
+ },
116
+ ),
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.TEST,
119
+ # These kwargs will be passed to _generate_examples
120
+ gen_kwargs={
121
+ "filepath": data_dir['test'],
122
+ "split": "test"
123
+ },
124
+ ),
125
+ datasets.SplitGenerator(
126
+ name=datasets.Split.VALIDATION,
127
+ # These kwargs will be passed to _generate_examples
128
+ gen_kwargs={
129
+ "filepath": data_dir['valid'],
130
+ "split": "valid",
131
+ },
132
+ ),
133
+ ]
134
+
135
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
136
+ def _generate_examples(self, filepath, split):
137
+ with open(filepath, encoding="utf-8") as f:
138
+ for key, row in enumerate(f):
139
+ data = json.loads(row)
140
+ if self.config.name == "extraction":
141
+ # Yields examples as (key, example) tuples
142
+ yield key, {
143
+ "id": data['paper_id'],
144
+ "document": data["document"],
145
+ "doc_bio_tags": data.get("doc_bio_tags")
146
+ }
147
+ elif self.config.name == "generation":
148
+ yield key, {
149
+ "id": data['paper_id'],
150
+ "document": data["document"],
151
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
152
+ "abstractive_keyphrases": data.get("abstractive_keyphrases")
153
+ }
154
+ else:
155
+ yield key, {
156
+ "id": data['paper_id'],
157
+ "document": data["document"],
158
+ "doc_bio_tags": data.get("doc_bio_tags"),
159
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
160
+ "abstractive_keyphrases": data.get("abstractive_keyphrases"),
161
+ "other_metadata": data["other_metadata"]
162
+ }