Lexi commited on
Commit
548e58c
1 Parent(s): 4a36de9

Upload spanextract.py

Browse files
Files changed (1) hide show
  1. spanextract.py +110 -0
spanextract.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """SQUAD: The Stanford Question Answering Dataset."""
18
+
19
+
20
+ import json
21
+
22
+ import datasets
23
+ from datasets.tasks import QuestionAnsweringExtractive
24
+
25
+
26
+ logger = datasets.logging.get_logger(__name__)
27
+
28
+
29
+ #_URL = "https://huggingface.co/datasets/Lexi/NQ_squad_format/blob/main/"
30
+ _URLS = {
31
+ "train": "train.json",
32
+ "dev": "dev_incomplete.json",
33
+ }
34
+
35
+
36
+ class SquadConfig(datasets.BuilderConfig):
37
+ """BuilderConfig for SQUAD."""
38
+
39
+ def __init__(self, **kwargs):
40
+ """BuilderConfig for SQUAD.
41
+
42
+ Args:
43
+ **kwargs: keyword arguments forwarded to super.
44
+ """
45
+ super(SquadConfig, self).__init__(**kwargs)
46
+
47
+
48
+ class Squad(datasets.GeneratorBasedBuilder):
49
+ """SQUAD: The Stanford Question Answering Dataset. Version 1.1."""
50
+
51
+ BUILDER_CONFIGS = [
52
+ SquadConfig(
53
+ name="plain_text",
54
+ version=datasets.Version("1.0.0", ""),
55
+ description="Plain text",
56
+ ),
57
+ ]
58
+
59
+ def _info(self):
60
+ return datasets.DatasetInfo(
61
+ #description=_DESCRIPTION,
62
+ features=datasets.Features(
63
+ {
64
+ "id": datasets.Value("int32"),
65
+ "context": datasets.Value("string"),
66
+ "question": datasets.Value("string"),
67
+ "answers": datasets.features.Sequence(
68
+ {
69
+ "text": datasets.Value("string"),
70
+ "answer_start": datasets.Value("int32"),
71
+ }
72
+ ),
73
+ }
74
+ ),
75
+ # No default supervised_keys (as we have to pass both question
76
+ # and context as input).
77
+ supervised_keys=None,
78
+ task_templates=[
79
+ QuestionAnsweringExtractive(
80
+ question_column="question", context_column="context", answers_column="answers"
81
+ )
82
+ ],
83
+ )
84
+
85
+ def _split_generators(self, dl_manager):
86
+ downloaded_files = dl_manager.download_and_extract(_URLS)
87
+
88
+ return [
89
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
90
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
91
+ ]
92
+
93
+ def _generate_examples(self, filepath):
94
+ """This function returns the examples in the raw (text) form."""
95
+ logger.info("generating examples from = %s", filepath)
96
+ key = 0
97
+ print(filepath)
98
+ with open(filepath, 'rb') as f:
99
+ data = json.load(f)
100
+ print("example data: ", data[0])
101
+ print("number of data: ", len(data))
102
+ print("data keys: ", data[0].keys())
103
+ for line in data:
104
+ yield key, {
105
+ "context": line['context'],
106
+ "question": line["question"],
107
+ "id": line["id"],
108
+ "answers": line['answers']
109
+ }
110
+ key += 1