dibyaaaaax commited on
Commit
afa9044
1 Parent(s): 18b6d8d

Create kp20k.py

Browse files
Files changed (1) hide show
  1. kp20k.py +159 -0
kp20k.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+ # _SPLIT = ['train', 'test', 'valid']
5
+ _CITATION = """\
6
+ @InProceedings{meng-EtAl:2017:Long,
7
+ author = {Meng, Rui and Zhao, Sanqiang and Han, Shuguang and He, Daqing and Brusilovsky, Peter and Chi, Yu},
8
+ title = {Deep Keyphrase Generation},
9
+ booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
10
+ month = {July},
11
+ year = {2017},
12
+ address = {Vancouver, Canada},
13
+ publisher = {Association for Computational Linguistics},
14
+ pages = {582--592},
15
+ url = {http://aclweb.org/anthology/P17-1054}
16
+ }
17
+ """
18
+
19
+ _DESCRIPTION = """\
20
+
21
+ """
22
+
23
+ _HOMEPAGE = "http://memray.me/uploads/acl17-keyphrase-generation.pdf"
24
+
25
+ # The license information was obtained from https://github.com/boudinfl/ake-datasets as the dataset shared over here is taken from here
26
+ _LICENSE = ""
27
+
28
+ # TODO: Add link to the official dataset URLs here
29
+
30
+ _URLS = {
31
+ "test": "test.jsonl",
32
+ "train": "train.jsonl",
33
+ "valid": "valid.jsonl"
34
+ }
35
+
36
+
37
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
38
+ class KP20k(datasets.GeneratorBasedBuilder):
39
+ """TODO: Short description of my dataset."""
40
+
41
+ VERSION = datasets.Version("0.0.1")
42
+
43
+ BUILDER_CONFIGS = [
44
+ datasets.BuilderConfig(name="extraction", version=VERSION,
45
+ description="This part of my dataset covers extraction"),
46
+ datasets.BuilderConfig(name="generation", version=VERSION,
47
+ description="This part of my dataset covers generation"),
48
+ datasets.BuilderConfig(name="raw", version=VERSION, description="This part of my dataset covers the raw data"),
49
+ ]
50
+
51
+ DEFAULT_CONFIG_NAME = "extraction"
52
+
53
+ def _info(self):
54
+ if self.config.name == "extraction": # This is the name of the configuration selected in BUILDER_CONFIGS above
55
+ features = datasets.Features(
56
+ {
57
+ "id": datasets.Value("int64"),
58
+ "document": datasets.features.Sequence(datasets.Value("string")),
59
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string"))
60
+
61
+ }
62
+ )
63
+ elif self.config.name == "generation":
64
+ features = datasets.Features(
65
+ {
66
+ "id": datasets.Value("int64"),
67
+ "document": datasets.features.Sequence(datasets.Value("string")),
68
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
69
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string"))
70
+
71
+ }
72
+ )
73
+ else:
74
+ features = datasets.Features(
75
+ {
76
+ "id": datasets.Value("int64"),
77
+ "document": datasets.features.Sequence(datasets.Value("string")),
78
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string")),
79
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
80
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
81
+ "other_metadata": datasets.features.Sequence(
82
+ {
83
+ "text": datasets.features.Sequence(datasets.Value("string")),
84
+ "bio_tags": datasets.features.Sequence(datasets.Value("string"))
85
+ }
86
+ )
87
+
88
+ }
89
+ )
90
+ return datasets.DatasetInfo(
91
+ # This is the description that will appear on the datasets page.
92
+ description=_DESCRIPTION,
93
+ # This defines the different columns of the dataset and their types
94
+ features=features,
95
+ homepage=_HOMEPAGE,
96
+ # License for the dataset if available
97
+ license=_LICENSE,
98
+ # Citation for the dataset
99
+ citation=_CITATION,
100
+ )
101
+
102
+ def _split_generators(self, dl_manager):
103
+
104
+ data_dir = dl_manager.download_and_extract(_URLS)
105
+ return [
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.TRAIN,
108
+ # These kwargs will be passed to _generate_examples
109
+ gen_kwargs={
110
+ "filepath": data_dir['train'],
111
+ "split": "train",
112
+ },
113
+ ),
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.TEST,
116
+ # These kwargs will be passed to _generate_examples
117
+ gen_kwargs={
118
+ "filepath": data_dir['test'],
119
+ "split": "test"
120
+ },
121
+ ),
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.VALIDATION,
124
+ # These kwargs will be passed to _generate_examples
125
+ gen_kwargs={
126
+ "filepath": data_dir['valid'],
127
+ "split": "valid",
128
+ },
129
+ ),
130
+ ]
131
+
132
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
133
+ def _generate_examples(self, filepath, split):
134
+ with open(filepath, encoding="utf-8") as f:
135
+ for key, row in enumerate(f):
136
+ data = json.loads(row)
137
+ if self.config.name == "extraction":
138
+ # Yields examples as (key, example) tuples
139
+ yield key, {
140
+ "id": data.get("paper_id"),
141
+ "document": data["document"],
142
+ "doc_bio_tags": data.get("doc_bio_tags")
143
+ }
144
+ elif self.config.name == "generation":
145
+ yield key, {
146
+ "id": data.get("paper_id"),
147
+ "document": data["document"],
148
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
149
+ "abstractive_keyphrases": data.get("abstractive_keyphrases")
150
+ }
151
+ else:
152
+ yield key, {
153
+ "id": data.get("paper_id"),
154
+ "document": data["document"],
155
+ "doc_bio_tags": data.get("doc_bio_tags"),
156
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
157
+ "abstractive_keyphrases": data.get("abstractive_keyphrases"),
158
+ "other_metadata": data["other_metadata"]
159
+ }