rajivratn commited on
Commit
51f604c
1 Parent(s): f8ea528

Upload inspec.py

Browse files
Files changed (1) hide show
  1. inspec.py +170 -0
inspec.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import os
4
+
5
+ import datasets
6
+ from typing import List, Any
7
+
8
+ # _SPLIT = ['train', 'test', 'valid']
9
+ _CITATION = """\
10
+ author: amardeep
11
+ """
12
+
13
+
14
+ _DESCRIPTION = """\
15
+ This new dataset is designed to solve kp NLP task and is crafted with a lot of care.
16
+ """
17
+
18
+
19
+ _HOMEPAGE = ""
20
+
21
+ # TODO: Add the licence for the dataset here if you can find it
22
+ _LICENSE = ""
23
+
24
+ # TODO: Add link to the official dataset URLs here
25
+
26
+ _URLS = {
27
+ "test": "test.jsonl",
28
+ # "train": "train.jsonl",
29
+ "valid": "valid.jsonl"
30
+ }
31
+
32
+
33
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
34
+ class TestLDKP(datasets.GeneratorBasedBuilder):
35
+ """TODO: Short description of my dataset."""
36
+
37
+ VERSION = datasets.Version("1.1.0")
38
+
39
+ BUILDER_CONFIGS = [
40
+ datasets.BuilderConfig(name="extraction", version=VERSION, description="This part of my dataset covers long document"),
41
+ datasets.BuilderConfig(name="generation", version=VERSION, description="This part of my dataset covers abstract only"),
42
+ datasets.BuilderConfig(name="raw", version=VERSION, description="This part of my dataset covers abstract only"),
43
+ datasets.BuilderConfig(name="ldkp_generation", version=VERSION, description="This part of my dataset covers abstract only"),
44
+ datasets.BuilderConfig(name="ldkp_extraction", version=VERSION, description="This part of my dataset covers abstract only"),
45
+
46
+ ]
47
+
48
+ DEFAULT_CONFIG_NAME = "extraction"
49
+
50
+ def _info(self):
51
+ if self.config.name == "extraction" or self.config.name == "ldkp_extraction": # This is the name of the configuration selected in BUILDER_CONFIGS above
52
+ features = datasets.Features(
53
+ {
54
+ "id": datasets.Value("int64")
55
+ "document": datasets.features.Sequence(datasets.Value("string")),
56
+ "BIO_tags": datasets.features.Sequence(datasets.Value("string"))
57
+
58
+ }
59
+
60
+ elif self.config.name == "generation" or self.config.name == "ldkp_generation":
61
+ features = datasets.Features(
62
+ {
63
+ "id": datasets.Value("int64")
64
+ "document": datasets.features.Sequence(datasets.Value("string")),
65
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
66
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string"))
67
+
68
+ }
69
+ )
70
+ else:
71
+ features = datasets.Features(
72
+ {
73
+ "id": datasets.Value("int64")
74
+ "document": datasets.features.Sequence(datasets.Value("string")),
75
+ "document_tags": datasets.features.Sequence(datasets.Value("string")),
76
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
77
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
78
+ "other_metadata": datasets.features.Sequence(
79
+ {
80
+ "text": datasets.features.Sequence(datasets.Value("string")),
81
+ "tags":datasets.features.Sequence(datasets.Value("string"))
82
+ }
83
+ )
84
+
85
+
86
+ }
87
+ )
88
+ return datasets.DatasetInfo(
89
+ # This is the description that will appear on the datasets page.
90
+ description=_DESCRIPTION,
91
+ # This defines the different columns of the dataset and their types
92
+ features=features,
93
+ homepage=_HOMEPAGE,
94
+ # License for the dataset if available
95
+ license=_LICENSE,
96
+ # Citation for the dataset
97
+ citation=_CITATION,
98
+ )
99
+
100
+ def _split_generators(self, dl_manager):
101
+
102
+ data_dir = dl_manager.download_and_extract(_URLS)
103
+ return [
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.TRAIN,
106
+ # These kwargs will be passed to _generate_examples
107
+ gen_kwargs={
108
+ "filepath": data_dir['train'],
109
+ "split": "train",
110
+ },
111
+ ),
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.TEST,
114
+ # These kwargs will be passed to _generate_examples
115
+ gen_kwargs={
116
+ "filepath": data_dir['test'],
117
+ "split": "test"
118
+ },
119
+ ),
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.VALIDATION,
122
+ # These kwargs will be passed to _generate_examples
123
+ gen_kwargs={
124
+ "filepath": data_dir['valid'],
125
+ "split": "valid",
126
+ },
127
+ ),
128
+ ]
129
+
130
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
131
+ def _generate_examples(self, filepath, split):
132
+ with open(filepath, encoding="utf-8") as f:
133
+ for key, row in enumerate(f):
134
+ data = json.loads(row)
135
+ if self.config.name == "extraction":
136
+ # Yields examples as (key, example) tuples
137
+ yield key, {
138
+ "id": data['paper_id']
139
+ "document": data["document"],
140
+ "BIO_tags": data["document_tags"]
141
+ }
142
+ elif self.config.name == "ldkp_extraction":
143
+ yield key, {
144
+ "id": data['paper_id']
145
+ "document": data["document"]+data["other_metadata"]['text'],
146
+ "BIO_tags": data["document_tags"] + data["other_metadata"]['tags']
147
+ }
148
+ elif self.config.name == "ldkp_generation":
149
+ yield key, {
150
+ "id": data['paper_id']
151
+ "document": data["document"]+data["other_metadata"]['text'],
152
+ "extractive_keyphrases": data["extractive_keyphrases"],
153
+ "abstractive_keyphrases": data["abstractive_keyphrases"]
154
+ }
155
+ elif self.config.name == "generation":
156
+ yield key, {
157
+ "id": data['paper_id']
158
+ "document": data["document"],
159
+ "extractive_keyphrases": data["extractive_keyphrases"],
160
+ "abstractive_keyphrases": data["abstractive_keyphrases"]
161
+ }
162
+ else:
163
+ yield key, {
164
+ "id": data['paper_id']
165
+ "document": data["document"],
166
+ "document_tags": data["document_tags"],
167
+ "extractive_keyphrases": data["extractive_keyphrases"],
168
+ "abstractive_keyphrases": data["abstractive_keyphrases"],
169
+ "other_metadata": data["other_metadata"]
170
+ }