Datasets:

Modalities:
Text
Libraries:
Datasets
dibyaaaaax commited on
Commit
0d318b9
1 Parent(s): 7589680

Upload pubmed.py

Browse files
Files changed (1) hide show
  1. pubmed.py +136 -0
pubmed.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+ # _SPLIT = ['test']
5
+ _CITATION = """\
6
+ @inproceedings{Schutz2008KeyphraseEF,
7
+ title={Keyphrase Extraction from Single Documents in the Open Domain Exploiting Linguistic and Statistical Methods},
8
+ author={Alexander Schutz},
9
+ year={2008}
10
+ }
11
+
12
+ """
13
+
14
+ _DESCRIPTION = """\
15
+
16
+ """
17
+
18
+ _HOMEPAGE = ""
19
+
20
+ # TODO: Add the licence for the dataset here if you can find it
21
+ _LICENSE = ""
22
+
23
+ # TODO: Add link to the official dataset URLs here
24
+
25
+ _URLS = {
26
+ "test": "test.jsonl"
27
+ }
28
+
29
+
30
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
31
+ class Pubmed(datasets.GeneratorBasedBuilder):
32
+ """TODO: Short description of my dataset."""
33
+
34
+ VERSION = datasets.Version("0.0.1")
35
+
36
+ BUILDER_CONFIGS = [
37
+ datasets.BuilderConfig(name="extraction", version=VERSION,
38
+ description="This part of my dataset covers extraction"),
39
+ datasets.BuilderConfig(name="generation", version=VERSION,
40
+ description="This part of my dataset covers generation"),
41
+ datasets.BuilderConfig(name="raw", version=VERSION, description="This part of my dataset covers the raw data"),
42
+ ]
43
+
44
+ DEFAULT_CONFIG_NAME = "extraction"
45
+
46
+ def _info(self):
47
+ if self.config.name == "extraction": # This is the name of the configuration selected in BUILDER_CONFIGS above
48
+ features = datasets.Features(
49
+ {
50
+ "id": datasets.Value("string"),
51
+ "document": datasets.features.Sequence(datasets.Value("string")),
52
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string"))
53
+
54
+ }
55
+ )
56
+ elif self.config.name == "generation":
57
+ features = datasets.Features(
58
+ {
59
+ "id": datasets.Value("string"),
60
+ "document": datasets.features.Sequence(datasets.Value("string")),
61
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
62
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string"))
63
+
64
+ }
65
+ )
66
+ else:
67
+ features = datasets.Features(
68
+ {
69
+ "id": datasets.Value("string"),
70
+ "document": datasets.features.Sequence(datasets.Value("string")),
71
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string")),
72
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
73
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
74
+ "other_metadata": datasets.features.Sequence(
75
+ {
76
+ "text": datasets.features.Sequence(datasets.Value("string")),
77
+ "bio_tags": datasets.features.Sequence(datasets.Value("string"))
78
+ }
79
+ )
80
+
81
+ }
82
+ )
83
+ return datasets.DatasetInfo(
84
+ # This is the description that will appear on the datasets page.
85
+ description=_DESCRIPTION,
86
+ # This defines the different columns of the dataset and their types
87
+ features=features,
88
+ homepage=_HOMEPAGE,
89
+ # License for the dataset if available
90
+ license=_LICENSE,
91
+ # Citation for the dataset
92
+ citation=_CITATION,
93
+ )
94
+
95
+ def _split_generators(self, dl_manager):
96
+
97
+ data_dir = dl_manager.download_and_extract(_URLS)
98
+ return [
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.TEST,
101
+ # These kwargs will be passed to _generate_examples
102
+ gen_kwargs={
103
+ "filepath": data_dir['test'],
104
+ "split": "test"
105
+ },
106
+ ),
107
+ ]
108
+
109
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
110
+ def _generate_examples(self, filepath, split):
111
+ with open(filepath, encoding="utf-8") as f:
112
+ for key, row in enumerate(f):
113
+ data = json.loads(row)
114
+ if self.config.name == "extraction":
115
+ # Yields examples as (key, example) tuples
116
+ yield key, {
117
+ "id": data['paper_id'],
118
+ "document": data["document"],
119
+ "doc_bio_tags": data.get("doc_bio_tags")
120
+ }
121
+ elif self.config.name == "generation":
122
+ yield key, {
123
+ "id": data['paper_id'],
124
+ "document": data["document"],
125
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
126
+ "abstractive_keyphrases": data.get("abstractive_keyphrases")
127
+ }
128
+ else:
129
+ yield key, {
130
+ "id": data['paper_id'],
131
+ "document": data["document"],
132
+ "doc_bio_tags": data.get("doc_bio_tags"),
133
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
134
+ "abstractive_keyphrases": data.get("abstractive_keyphrases"),
135
+ "other_metadata": data["other_metadata"]
136
+ }