Amardeep Kumar commited on
Commit
8063cb1
1 Parent(s): 84f0827

add data and ldkp3k.py

Browse files
Files changed (2) hide show
  1. data/long_data_sample.json +0 -0
  2. ldkp3k.py +119 -0
data/long_data_sample.json ADDED
The diff for this file is too large to render. See raw diff
ldkp3k.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import os
4
+
5
+ import datasets
6
+ from typing import List, Any
7
+
8
+ # _SPLIT = ['train', 'test', 'valid']
9
+ _CITATION = """\
10
+ TBA
11
+ """
12
+
13
+
14
+ _DESCRIPTION = """\
15
+ This new dataset is designed to solve kp NLP task and is crafted with a lot of care.
16
+ """
17
+
18
+
19
+ _HOMEPAGE = ""
20
+
21
+ # TODO: Add the licence for the dataset here if you can find it
22
+ _LICENSE = ""
23
+
24
+ # TODO: Add link to the official dataset URLs here
25
+
26
+ _URLS = {
27
+ "test": "data/test.jsonl",
28
+ "train": "train.jsonl",
29
+ "valid": "data/valid.jsonl"
30
+ }
31
+
32
+
33
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
34
+ class LDKP3k(datasets.GeneratorBasedBuilder):
35
+ """TODO: Short description of my dataset."""
36
+
37
+ VERSION = datasets.Version("1.1.0")
38
+
39
+ BUILDER_CONFIGS = [
40
+ datasets.BuilderConfig(name="small", version=VERSION, description="This part of my dataset covers long document"),
41
+ datasets.BuilderConfig(name="medium", version=VERSION, description="This part of my dataset covers abstract only"),
42
+ datasets.BuilderConfig(name="large", version=VERSION, description="This part of my dataset covers abstract only")
43
+
44
+
45
+ ]
46
+
47
+ DEFAULT_CONFIG_NAME = "small"
48
+
49
+ def _info(self):
50
+
51
+ _URLS['train']="data/"+str(self.config.name)+"/train.jsnol"
52
+
53
+
54
+ features = datasets.Features(
55
+
56
+ "id": datasets.Value("string")
57
+ "sections": datasets.features.Sequence(datasets.Value("string")),
58
+ "sec_text": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))),
59
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
60
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
61
+ "sec_bio_tags": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string")))
62
+
63
+ }
64
+ )
65
+ return datasets.DatasetInfo(
66
+ # This is the description that will appear on the datasets page.
67
+ description=_DESCRIPTION,
68
+ # This defines the different columns of the dataset and their types
69
+ features=features,
70
+ homepage=_HOMEPAGE,
71
+ # License for the dataset if available
72
+ license=_LICENSE,
73
+ # Citation for the dataset
74
+ citation=_CITATION,
75
+ )
76
+
77
+ def _split_generators(self, dl_manager):
78
+
79
+ data_dir = dl_manager.download_and_extract(_URLS)
80
+ return [
81
+ datasets.SplitGenerator(
82
+ name=datasets.Split.TRAIN,
83
+ # These kwargs will be passed to _generate_examples
84
+ gen_kwargs={
85
+ "filepath": data_dir['train'],
86
+ "split": "train",
87
+ },
88
+ ),
89
+ datasets.SplitGenerator(
90
+ name=datasets.Split.TEST,
91
+ # These kwargs will be passed to _generate_examples
92
+ gen_kwargs={
93
+ "filepath": data_dir['test'],
94
+ "split": "test"
95
+ },
96
+ ),
97
+ datasets.SplitGenerator(
98
+ name=datasets.Split.VALIDATION,
99
+ # These kwargs will be passed to _generate_examples
100
+ gen_kwargs={
101
+ "filepath": data_dir['valid'],
102
+ "split": "valid",
103
+ },
104
+ ),
105
+ ]
106
+
107
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
108
+ def _generate_examples(self, filepath, split):
109
+ with open(filepath, encoding="utf-8") as f:
110
+ for key, row in enumerate(f):
111
+ data = json.loads(row)
112
+ yield key, {
113
+ "id": data['paper_id']
114
+ "document": data["document"],
115
+ "document_tags": data["document_tags"],
116
+ "extractive_keyphrases": data["extractive_keyphrases"],
117
+ "abstractive_keyphrases": data["abstractive_keyphrases"],
118
+ "other_metadata": data["other_metadata"]
119
+ }