Datasets:
GEM
/

Tasks:
Other
Modalities:
Text
Languages:
English
ArXiv:
Tags:
question-generation
License:
Abinaya Mahendiran commited on
Commit
278a858
1 Parent(s): 305310d

Added data preparation script and updated data loader script

Browse files
Files changed (2) hide show
  1. data_preparation.py +112 -0
  2. squad_v2.py +38 -30
data_preparation.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Script to prepare the SQuAD2.0 data to the GEM format
2
+
3
+ @author: AbinayaM02
4
+ """
5
+
6
+ # Import libraries
7
+ import json
8
+ import pandas as pd
9
+ from sklearn.model_selection import train_test_split
10
+
11
+ # Function to generate gem id
12
+ def add_gem_id(data: dict, split: str) -> dict:
13
+ """
14
+ Add gem id for each of the datapoint in the dataset.
15
+
16
+ Parameters:
17
+ -----------
18
+ data: dict,
19
+ data.
20
+ split: str,
21
+ split of data (train, test or validation).
22
+
23
+ Returns:
24
+ --------
25
+ dict
26
+ dictionary with updated id
27
+ """
28
+ gem_id = -1
29
+ generated_data = {"data": []}
30
+ for example in data:
31
+ temp_dict = {}
32
+ title = example["title"]
33
+ for paragraph in example["paragraphs"]:
34
+ context = paragraph["context"] # do not strip leading blank spaces GH-2585
35
+ for qa in paragraph["qas"]:
36
+ question = qa["question"]
37
+ qa_id = qa["id"]
38
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
39
+ answers = [answer["text"] for answer in qa["answers"]]
40
+ # Features currently used are "context", "question", and "answers".
41
+ # Others are extracted here for the ease of future expansions.
42
+ gem_id += 1
43
+ temp_dict["id"] = qa_id
44
+ temp_dict["gem_id"] = f"gem-squad_v2-{split}-{gem_id}"
45
+ temp_dict["title"] = title
46
+ temp_dict["context"] = context
47
+ temp_dict["question"] = question
48
+ temp_dict["answers"] = {
49
+ "answer_start": answer_starts,
50
+ "text": answers,
51
+ }
52
+ generated_data["data"].append(temp_dict)
53
+ return generated_data
54
+
55
+
56
+ # Function to split data
57
+ def split_data(file_name: str, data_type: str) -> (dict, dict):
58
+ """
59
+ Method to split the data specific to SQuAD2.0
60
+
61
+ Parameters:
62
+ -----------
63
+ file_name: str,
64
+ name of the file.
65
+ data_type: str,
66
+ type of the data file.
67
+
68
+ Returns:
69
+ --------
70
+ (dict, dict)
71
+ split of data
72
+ """
73
+
74
+ if data_type == "json":
75
+ with open(file_name, 'r') as json_file:
76
+ data = json.load(json_file)["data"]
77
+ json_file.close()
78
+
79
+ # split the data into train and test
80
+ train, test = train_test_split(data, train_size=0.7, random_state = 42)
81
+ return(train, test)
82
+
83
+
84
+ if __name__ == "__main__":
85
+ # split the train data
86
+ train, test = split_data("squad_data/train-v2.0.json", "json")
87
+
88
+ # add gem id and save the files
89
+ train = add_gem_id(train, "train")
90
+ test = add_gem_id(test, "test")
91
+
92
+ # save the train split
93
+ with open("train.json", "w") as train_file:
94
+ json.dump(train, train_file, indent = 2)
95
+ train_file.close()
96
+
97
+ # save the test split
98
+ with open("test.json", "w") as test_file:
99
+ json.dump(test, test_file, indent = 2)
100
+ test_file.close()
101
+
102
+ # load validation data
103
+ with open("squad_data/dev-v2.0.json", "r") as dev_file:
104
+ validation = json.load(dev_file)["data"]
105
+ dev_file.close()
106
+
107
+ # add gem id and save valid.json
108
+ validation = add_gem_id(validation, "validation")
109
+ with open("valid.json", "w") as val_file:
110
+ json.dump(validation, val_file, indent = 2)
111
+ val_file.close()
112
+
squad_v2.py CHANGED
@@ -23,15 +23,15 @@ archivePrefix = {arXiv},
23
  """
24
 
25
  _DESCRIPTION = """\
26
- combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers
27
  to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but
28
  also determine when no answer is supported by the paragraph and abstain from answering.
29
  """
30
 
31
- _URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/"
32
  _URLS = {
33
- "train": _URL + "train-v2.0.json",
34
- "dev": _URL + "dev-v2.0.json",
 
35
  }
36
 
37
 
@@ -64,6 +64,7 @@ class SquadV2(datasets.GeneratorBasedBuilder):
64
  features=datasets.Features(
65
  {
66
  "gem_id": datasets.Value("string"),
 
67
  "title": datasets.Value("string"),
68
  "context": datasets.Value("string"),
69
  "question": datasets.Value("string"),
@@ -99,35 +100,42 @@ class SquadV2(datasets.GeneratorBasedBuilder):
99
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
100
 
101
  return [
102
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"], "split": "train"}),
103
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"], "split": "validation"}),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  ]
105
 
106
  def _generate_examples(self, filepath, split):
107
  """Yields examples."""
108
  # TODO(squad_v2): Yields (key, example) tuples from the dataset
109
  with open(filepath, encoding="utf-8") as f:
110
- squad = json.load(f)
111
- for example in squad["data"]:
112
- title = example.get("title", "")
113
- for paragraph in example["paragraphs"]:
114
- context = paragraph["context"] # do not strip leading blank spaces GH-2585
115
- for qa in paragraph["qas"]:
116
- question = qa["question"]
117
- id_ = qa["id"]
118
-
119
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
120
- answers = [answer["text"] for answer in qa["answers"]]
121
-
122
- # Features currently used are "context", "question", and "answers".
123
- # Others are extracted here for the ease of future expansions.
124
- yield id_, {
125
- "title": title,
126
- "context": context,
127
- "question": question,
128
- "gem_id": f"gem-{squad_v2}-{split}-{id_}",
129
- "answers": {
130
- "answer_start": answer_starts,
131
- "text": answers,
132
- },
133
- }
 
23
  """
24
 
25
  _DESCRIPTION = """\
26
+ SQuAD2.0 combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers
27
  to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but
28
  also determine when no answer is supported by the paragraph and abstain from answering.
29
  """
30
 
 
31
  _URLS = {
32
+ "train": "train.json",
33
+ "test": "test.json",
34
+ "valid": "validation.json",
35
  }
36
 
37
 
 
64
  features=datasets.Features(
65
  {
66
  "gem_id": datasets.Value("string"),
67
+ "id": datasets.Value("string"),
68
  "title": datasets.Value("string"),
69
  "context": datasets.Value("string"),
70
  "question": datasets.Value("string"),
 
100
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
101
 
102
  return [
103
+ datasets.SplitGenerator(
104
+ name=datasets.Split.TRAIN,
105
+ gen_kwargs={
106
+ "filepath": downloaded_files["train"],
107
+ "split": "train",
108
+ },
109
+ ),
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.VALIDATION,
112
+ gen_kwargs={
113
+ "filepath": downloaded_files["validation"],
114
+ "split": "validation",
115
+ },
116
+ ),
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.TEST,
119
+ gen_kwargs={
120
+ "filepath": downloaded_files["test"],
121
+ "split": "test",
122
+ },
123
+ ),
124
  ]
125
 
126
  def _generate_examples(self, filepath, split):
127
  """Yields examples."""
128
  # TODO(squad_v2): Yields (key, example) tuples from the dataset
129
  with open(filepath, encoding="utf-8") as f:
130
+ for id_, row in enumerate(f):
131
+ data = json.loads(row)
132
+ # Features currently used are "context", "question", and "answers".
133
+ # Others are extracted here for the ease of future expansions.
134
+ yield id_, {
135
+ "id": data["id"],
136
+ "gem_id": data["gem_id"],
137
+ "title": data["title"],
138
+ "context": data["context"],
139
+ "question": data["question"],
140
+ "answers": data["answers"],
141
+ }