ad6398 commited on
Commit
268b081
1 Parent(s): 64850f5

Update ldkp10k.py

Browse files
Files changed (1) hide show
  1. ldkp10k.py +121 -0
ldkp10k.py CHANGED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import os
4
+
5
+ import datasets
6
+ from typing import List, Any
7
+
8
+ # _SPLIT = ['train', 'test', 'valid']
9
+ _CITATION = """\
10
+ TBA
11
+ """
12
+
13
+
14
+ _DESCRIPTION = """\
15
+ This new dataset is designed to solve kp NLP task and is crafted with a lot of care.
16
+ """
17
+
18
+
19
+ _HOMEPAGE = ""
20
+
21
+ # TODO: Add the licence for the dataset here if you can find it
22
+ _LICENSE = ""
23
+
24
+ # TODO: Add link to the official dataset URLs here
25
+
26
+ _URLS = {
27
+ "test": ["data/test.jsonl"],
28
+ "train": ["dummy_path"],
29
+ "valid": ["data/valid.jsonl"],
30
+
31
+ }
32
+
33
+
34
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
35
+ class LDKP3k(datasets.GeneratorBasedBuilder):
36
+ """TODO: Short description of my dataset."""
37
+
38
+ VERSION = datasets.Version("1.1.0")
39
+
40
+ BUILDER_CONFIGS = [
41
+ datasets.BuilderConfig(name="small", version=VERSION, description="This part of my dataset covers long document"),
42
+ datasets.BuilderConfig(name="medium", version=VERSION, description="This part of my dataset covers abstract only"),
43
+ datasets.BuilderConfig(name="large", version=VERSION, description="This part of my dataset covers abstract only")
44
+
45
+
46
+ ]
47
+
48
+ DEFAULT_CONFIG_NAME = "small"
49
+
50
+ def _info(self):
51
+ #print(os.listdir())
52
+ _URLS['train']="data/"+self.config.name+".zip"
53
+
54
+ features = datasets.Features(
55
+ {
56
+ "id": datasets.Value("string"),
57
+ "sections": datasets.features.Sequence(datasets.Value("string")),
58
+ "sec_text": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))),
59
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
60
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
61
+ "sec_bio_tags": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string")))
62
+
63
+ }
64
+ )
65
+ return datasets.DatasetInfo(
66
+ # This is the description that will appear on the datasets page.
67
+ description=_DESCRIPTION,
68
+ # This defines the different columns of the dataset and their types
69
+ features=features,
70
+ homepage=_HOMEPAGE,
71
+ # License for the dataset if available
72
+ license=_LICENSE,
73
+ # Citation for the dataset
74
+ citation=_CITATION,
75
+ )
76
+
77
+ def _split_generators(self, dl_manager):
78
+ print(os.listdir())
79
+ data_dir = dl_manager.download_and_extract(_URLS)
80
+
81
+ return [
82
+ datasets.SplitGenerator(
83
+ name=datasets.Split.TRAIN,
84
+ # These kwargs will be passed to _generate_examples
85
+ gen_kwargs={
86
+ "filepaths": [os.filepath.join(data_dir['train'],filename) for filename in os.listdir(data_dir['train'])],
87
+ "split": "train",
88
+ },
89
+ ),
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TEST,
92
+ # These kwargs will be passed to _generate_examples
93
+ gen_kwargs={
94
+ "filepaths": data_dir['test'],
95
+ "split": "test"
96
+ },
97
+ ),
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.VALIDATION,
100
+ # These kwargs will be passed to _generate_examples
101
+ gen_kwargs={
102
+ "filepaths": data_dir['valid'],
103
+ "split": "valid",
104
+ },
105
+ ),
106
+ ]
107
+
108
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
109
+ def _generate_examples(self, filepaths, split):
110
+ for filepath in filepaths:
111
+ with open(filepath, encoding="utf-8") as f:
112
+ for key, row in enumerate(f):
113
+ data = json.loads(row)
114
+ yield key, {
115
+ "id": data['paper_id'],
116
+ "sections": data["sections"],
117
+ "sec_text": data["sec_text"],
118
+ "extractive_keyphrases": data["extractive_keyphrases"],
119
+ "abstractive_keyphrases": data["abstractive_keyphrases"],
120
+ "sec_bio_tags": data["sec_bio_tags"]
121
+ }