Datasets:

Modalities:
Text
Libraries:
Datasets
dibyaaaaax commited on
Commit
7529919
·
1 Parent(s): c460b44

Upload kptimes.py

Browse files
Files changed (1) hide show
  1. kptimes.py +148 -0
kptimes.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+ # _SPLIT = ['train', 'test', 'valid']
5
+ _CITATION = """\
6
+ """
7
+
8
+ _DESCRIPTION = """\
9
+
10
+ """
11
+
12
+ _HOMEPAGE = ""
13
+
14
+ # TODO: Add the licence for the dataset here if you can find it
15
+ _LICENSE = ""
16
+
17
+ # TODO: Add link to the official dataset URLs here
18
+
19
+ _URLS = {
20
+ "test": "test.jsonl",
21
+ "train": "train.jsonl",
22
+ "valid": "valid.jsonl"
23
+ }
24
+
25
+
26
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
27
+ class Inspec(datasets.GeneratorBasedBuilder):
28
+ """TODO: Short description of my dataset."""
29
+
30
+ VERSION = datasets.Version("0.0.1")
31
+
32
+ BUILDER_CONFIGS = [
33
+ datasets.BuilderConfig(name="extraction", version=VERSION,
34
+ description="This part of my dataset covers extraction"),
35
+ datasets.BuilderConfig(name="generation", version=VERSION,
36
+ description="This part of my dataset covers generation"),
37
+ datasets.BuilderConfig(name="raw", version=VERSION, description="This part of my dataset covers the raw data"),
38
+ ]
39
+
40
+ DEFAULT_CONFIG_NAME = "extraction"
41
+
42
+ def _info(self):
43
+ if self.config.name == "extraction": # This is the name of the configuration selected in BUILDER_CONFIGS above
44
+ features = datasets.Features(
45
+ {
46
+ "id": datasets.Value("int64"),
47
+ "document": datasets.features.Sequence(datasets.Value("string")),
48
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string"))
49
+
50
+ }
51
+ )
52
+ elif self.config.name == "generation":
53
+ features = datasets.Features(
54
+ {
55
+ "id": datasets.Value("int64"),
56
+ "document": datasets.features.Sequence(datasets.Value("string")),
57
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
58
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string"))
59
+
60
+ }
61
+ )
62
+ else:
63
+ features = datasets.Features(
64
+ {
65
+ "id": datasets.Value("int64"),
66
+ "document": datasets.features.Sequence(datasets.Value("string")),
67
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string")),
68
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
69
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
70
+ "other_metadata": datasets.features.Sequence(
71
+ {
72
+ "text": datasets.features.Sequence(datasets.Value("string")),
73
+ "bio_tags": datasets.features.Sequence(datasets.Value("string"))
74
+ }
75
+ )
76
+
77
+ }
78
+ )
79
+ return datasets.DatasetInfo(
80
+ # This is the description that will appear on the datasets page.
81
+ description=_DESCRIPTION,
82
+ # This defines the different columns of the dataset and their types
83
+ features=features,
84
+ homepage=_HOMEPAGE,
85
+ # License for the dataset if available
86
+ license=_LICENSE,
87
+ # Citation for the dataset
88
+ citation=_CITATION,
89
+ )
90
+
91
+ def _split_generators(self, dl_manager):
92
+
93
+ data_dir = dl_manager.download_and_extract(_URLS)
94
+ return [
95
+ datasets.SplitGenerator(
96
+ name=datasets.Split.TRAIN,
97
+ # These kwargs will be passed to _generate_examples
98
+ gen_kwargs={
99
+ "filepath": data_dir['train'],
100
+ "split": "train",
101
+ },
102
+ ),
103
+ datasets.SplitGenerator(
104
+ name=datasets.Split.TEST,
105
+ # These kwargs will be passed to _generate_examples
106
+ gen_kwargs={
107
+ "filepath": data_dir['test'],
108
+ "split": "test"
109
+ },
110
+ ),
111
+ datasets.SplitGenerator(
112
+ name=datasets.Split.VALIDATION,
113
+ # These kwargs will be passed to _generate_examples
114
+ gen_kwargs={
115
+ "filepath": data_dir['valid'],
116
+ "split": "valid",
117
+ },
118
+ ),
119
+ ]
120
+
121
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
122
+ def _generate_examples(self, filepath, split):
123
+ with open(filepath, encoding="utf-8") as f:
124
+ for key, row in enumerate(f):
125
+ data = json.loads(row)
126
+ if self.config.name == "extraction":
127
+ # Yields examples as (key, example) tuples
128
+ yield key, {
129
+ "id": data['paper_id'],
130
+ "document": data["document"],
131
+ "doc_bio_tags": data.get("doc_bio_tags")
132
+ }
133
+ elif self.config.name == "generation":
134
+ yield key, {
135
+ "id": data['paper_id'],
136
+ "document": data["document"],
137
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
138
+ "abstractive_keyphrases": data.get("abstractive_keyphrases")
139
+ }
140
+ else:
141
+ yield key, {
142
+ "id": data['paper_id'],
143
+ "document": data["document"],
144
+ "doc_bio_tags": data.get("doc_bio_tags"),
145
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
146
+ "abstractive_keyphrases": data.get("abstractive_keyphrases"),
147
+ "other_metadata": data["other_metadata"]
148
+ }