dibyaaaaax commited on
Commit
9674d67
1 Parent(s): 15f4b64

Upload cstr.py

Browse files
Files changed (1) hide show
  1. cstr.py +158 -0
cstr.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+ # _SPLIT = ['train', 'test', 'valid']
5
+ _CITATION = """\
6
+ @inproceedings{10.1145/313238.313437,
7
+ author = {Witten, Ian H. and Paynter, Gordon W. and Frank, Eibe and Gutwin, Carl and Nevill-Manning, Craig G.},
8
+ title = {KEA: Practical Automatic Keyphrase Extraction},
9
+ year = {1999},
10
+ isbn = {1581131453},
11
+ publisher = {Association for Computing Machinery},
12
+ address = {New York, NY, USA},
13
+ url = {https://doi.org/10.1145/313238.313437},
14
+ doi = {10.1145/313238.313437},
15
+ booktitle = {Proceedings of the Fourth ACM Conference on Digital Libraries},
16
+ pages = {254–255},
17
+ numpages = {2},
18
+ location = {Berkeley, California, USA},
19
+ series = {DL '99}
20
+ }
21
+ """
22
+
23
+ _DESCRIPTION = """\
24
+
25
+ """
26
+
27
+ _HOMEPAGE = ""
28
+
29
+ # TODO: Add the licence for the dataset here if you can find it
30
+ _LICENSE = ""
31
+
32
+ # TODO: Add link to the official dataset URLs here
33
+
34
+ _URLS = {
35
+ "test": "test.jsonl",
36
+ "train": "train.jsonl"
37
+ }
38
+
39
+
40
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
41
+ class CSTR(datasets.GeneratorBasedBuilder):
42
+ """TODO: Short description of my dataset."""
43
+
44
+ VERSION = datasets.Version("0.0.1")
45
+
46
+ BUILDER_CONFIGS = [
47
+ datasets.BuilderConfig(name="extraction", version=VERSION,
48
+ description="This part of my dataset covers extraction"),
49
+ datasets.BuilderConfig(name="generation", version=VERSION,
50
+ description="This part of my dataset covers generation"),
51
+ datasets.BuilderConfig(name="raw", version=VERSION, description="This part of my dataset covers the raw data"),
52
+ ]
53
+
54
+ DEFAULT_CONFIG_NAME = "extraction"
55
+
56
+ def _info(self):
57
+ if self.config.name == "extraction": # This is the name of the configuration selected in BUILDER_CONFIGS above
58
+ features = datasets.Features(
59
+ {
60
+ "id": datasets.Value("int64"),
61
+ "document": datasets.features.Sequence(datasets.Value("string")),
62
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string"))
63
+
64
+ }
65
+ )
66
+ elif self.config.name == "generation":
67
+ features = datasets.Features(
68
+ {
69
+ "id": datasets.Value("int64"),
70
+ "document": datasets.features.Sequence(datasets.Value("string")),
71
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
72
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string"))
73
+
74
+ }
75
+ )
76
+ else:
77
+ features = datasets.Features(
78
+ {
79
+ "id": datasets.Value("int64"),
80
+ "document": datasets.features.Sequence(datasets.Value("string")),
81
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string")),
82
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
83
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
84
+ "other_metadata": datasets.features.Sequence(
85
+ {
86
+ "text": datasets.features.Sequence(datasets.Value("string")),
87
+ "bio_tags": datasets.features.Sequence(datasets.Value("string"))
88
+ }
89
+ )
90
+
91
+ }
92
+ )
93
+ return datasets.DatasetInfo(
94
+ # This is the description that will appear on the datasets page.
95
+ description=_DESCRIPTION,
96
+ # This defines the different columns of the dataset and their types
97
+ features=features,
98
+ homepage=_HOMEPAGE,
99
+ # License for the dataset if available
100
+ license=_LICENSE,
101
+ # Citation for the dataset
102
+ citation=_CITATION,
103
+ )
104
+
105
+ def _split_generators(self, dl_manager):
106
+
107
+ data_dir = dl_manager.download_and_extract(_URLS)
108
+ return [
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TRAIN,
111
+ # These kwargs will be passed to _generate_examples
112
+ gen_kwargs={
113
+ "filepath": data_dir['train'],
114
+ "split": "train",
115
+ },
116
+ ),
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.TEST,
119
+ # These kwargs will be passed to _generate_examples
120
+ gen_kwargs={
121
+ "filepath": data_dir['test'],
122
+ "split": "test"
123
+ },
124
+ ),
125
+ ]
126
+
127
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
128
+ def _generate_examples(self, filepath, split):
129
+ with open(filepath, encoding="utf-8") as f:
130
+ for key, row in enumerate(f):
131
+ data = json.loads(row)
132
+ if self.config.name == "extraction":
133
+ # Yields examples as (key, example) tuples
134
+ yield key, {
135
+ "id": data['paper_id'],
136
+ "document": data["document"],
137
+ "doc_bio_tags": data.get("doc_bio_tags")
138
+ }
139
+ elif self.config.name == "generation":
140
+ yield key, {
141
+ "id": data['paper_id'],
142
+ "document": data["document"],
143
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
144
+ "abstractive_keyphrases": data.get("abstractive_keyphrases")
145
+ }
146
+ else:
147
+ yield key, {
148
+ "id": data['paper_id'],
149
+ "document": data["document"],
150
+ "doc_bio_tags": data.get("doc_bio_tags"),
151
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
152
+ "abstractive_keyphrases": data.get("abstractive_keyphrases"),
153
+ "other_metadata": data["other_metadata"]
154
+ }
155
+
156
+
157
+
158
+