Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
dibyaaaaax commited on
Commit
64c5978
1 Parent(s): aa9921b

Upload kpcrowd.py

Browse files
Files changed (1) hide show
  1. kpcrowd.py +151 -0
kpcrowd.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+ # _SPLIT = ['train', 'test', 'valid']
5
+ _CITATION = """\
6
+ @misc{marujo2013supervised,
7
+ title={Supervised Topical Key Phrase Extraction of News Stories using Crowdsourcing, Light Filtering and Co-reference Normalization},
8
+ author={Luis Marujo and Anatole Gershman and Jaime Carbonell and Robert Frederking and João P. Neto},
9
+ year={2013},
10
+ eprint={1306.4886},
11
+ archivePrefix={arXiv},
12
+ primaryClass={cs.CL}
13
+ }
14
+ """
15
+
16
+ _DESCRIPTION = """\
17
+
18
+ """
19
+
20
+ _HOMEPAGE = ""
21
+
22
+ # TODO: Add the licence for the dataset here if you can find it
23
+ _LICENSE = ""
24
+
25
+ # TODO: Add link to the official dataset URLs here
26
+
27
+ _URLS = {
28
+ "test": "test.jsonl",
29
+ "train": "train.jsonl"
30
+ }
31
+
32
+
33
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
34
+ class KPCrowd(datasets.GeneratorBasedBuilder):
35
+ """TODO: Short description of my dataset."""
36
+
37
+ VERSION = datasets.Version("0.0.1")
38
+
39
+ BUILDER_CONFIGS = [
40
+ datasets.BuilderConfig(name="extraction", version=VERSION,
41
+ description="This part of my dataset covers extraction"),
42
+ datasets.BuilderConfig(name="generation", version=VERSION,
43
+ description="This part of my dataset covers generation"),
44
+ datasets.BuilderConfig(name="raw", version=VERSION, description="This part of my dataset covers the raw data"),
45
+ ]
46
+
47
+ DEFAULT_CONFIG_NAME = "extraction"
48
+
49
+ def _info(self):
50
+ if self.config.name == "extraction": # This is the name of the configuration selected in BUILDER_CONFIGS above
51
+ features = datasets.Features(
52
+ {
53
+ "id": datasets.Value("string"),
54
+ "document": datasets.features.Sequence(datasets.Value("string")),
55
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string"))
56
+
57
+ }
58
+ )
59
+ elif self.config.name == "generation":
60
+ features = datasets.Features(
61
+ {
62
+ "id": datasets.Value("string"),
63
+ "document": datasets.features.Sequence(datasets.Value("string")),
64
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
65
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string"))
66
+
67
+ }
68
+ )
69
+ else:
70
+ features = datasets.Features(
71
+ {
72
+ "id": datasets.Value("string"),
73
+ "document": datasets.features.Sequence(datasets.Value("string")),
74
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string")),
75
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
76
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
77
+ "other_metadata": datasets.features.Sequence(
78
+ {
79
+ "text": datasets.features.Sequence(datasets.Value("string")),
80
+ "bio_tags": datasets.features.Sequence(datasets.Value("string"))
81
+ }
82
+ )
83
+
84
+ }
85
+ )
86
+ return datasets.DatasetInfo(
87
+ # This is the description that will appear on the datasets page.
88
+ description=_DESCRIPTION,
89
+ # This defines the different columns of the dataset and their types
90
+ features=features,
91
+ homepage=_HOMEPAGE,
92
+ # License for the dataset if available
93
+ license=_LICENSE,
94
+ # Citation for the dataset
95
+ citation=_CITATION,
96
+ )
97
+
98
+ def _split_generators(self, dl_manager):
99
+
100
+ data_dir = dl_manager.download_and_extract(_URLS)
101
+ return [
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TRAIN,
104
+ # These kwargs will be passed to _generate_examples
105
+ gen_kwargs={
106
+ "filepath": data_dir['train'],
107
+ "split": "train",
108
+ },
109
+ ),
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.TEST,
112
+ # These kwargs will be passed to _generate_examples
113
+ gen_kwargs={
114
+ "filepath": data_dir['test'],
115
+ "split": "test"
116
+ },
117
+ ),
118
+ ]
119
+
120
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
121
+ def _generate_examples(self, filepath, split):
122
+ with open(filepath, encoding="utf-8") as f:
123
+ for key, row in enumerate(f):
124
+ data = json.loads(row)
125
+ if self.config.name == "extraction":
126
+ # Yields examples as (key, example) tuples
127
+ yield key, {
128
+ "id": data['paper_id'],
129
+ "document": data["document"],
130
+ "doc_bio_tags": data.get("doc_bio_tags")
131
+ }
132
+ elif self.config.name == "generation":
133
+ yield key, {
134
+ "id": data['paper_id'],
135
+ "document": data["document"],
136
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
137
+ "abstractive_keyphrases": data.get("abstractive_keyphrases")
138
+ }
139
+ else:
140
+ yield key, {
141
+ "id": data['paper_id'],
142
+ "document": data["document"],
143
+ "doc_bio_tags": data.get("doc_bio_tags"),
144
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
145
+ "abstractive_keyphrases": data.get("abstractive_keyphrases"),
146
+ "other_metadata": data["other_metadata"]
147
+ }
148
+
149
+
150
+
151
+