vosadcii commited on
Commit
d9e8645
1 Parent(s): 62fe9d7
Files changed (8) hide show
  1. .gitattributes +2 -0
  2. .gitignore +2 -0
  3. README.md +26 -0
  4. data/test.json +3 -0
  5. data/train.json +3 -0
  6. data_preparation.py +43 -0
  7. policies.py +103 -0
  8. train.json +0 -0
.gitattributes CHANGED
@@ -52,3 +52,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
55
+ data/train.json filter=lfs diff=lfs merge=lfs -text
56
+ data/test.json filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ cache_policies
2
+ .DS_Store
README.md ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ dataset_info:
3
+ features:
4
+ - name: id
5
+ dtype: string
6
+ - name: context
7
+ dtype: string
8
+ - name: question
9
+ dtype: string
10
+ - name: answers
11
+ sequence:
12
+ - name: text
13
+ dtype: string
14
+ - name: answer_start
15
+ dtype: int32
16
+ config_name: plain_text
17
+ splits:
18
+ - name: train
19
+ num_bytes: 3245009
20
+ num_examples: 7632
21
+ - name: test
22
+ num_bytes: 359230
23
+ num_examples: 849
24
+ download_size: 5007313
25
+ dataset_size: 3604239
26
+ ---
data/test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b7d46b01cac93093bef2bfdb82be726abe0a636842823f5a3fa8dab50380f40
3
+ size 499720
data/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:001aa8ee8b183dc4b333dcdd8a8113e36aa45a0c53b4b061d5361f03269b3362
3
+ size 4507593
data_preparation.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from sklearn.model_selection import train_test_split
3
+
4
+
5
+ def split_data(file_name: str, data_type: str):
6
+ if data_type == "json":
7
+ with open(file_name, 'r') as json_file:
8
+ data = json.load(json_file)["data"]
9
+ json_file.close()
10
+
11
+ train, test = train_test_split(data, train_size=0.9, random_state=42)
12
+ return(train, test)
13
+
14
+
15
+ def save_json(data: dict, file_name: str):
16
+ """
17
+ Method to save the json file.
18
+ Parameters:
19
+ ----------
20
+ data: dict,
21
+ data to be saved in file.
22
+ file_name: str,
23
+ name of the file.
24
+ Returns:
25
+ --------
26
+ None
27
+ """
28
+
29
+ # save the split
30
+ with open(file_name, "w") as data_file:
31
+ json.dump(data, data_file, indent=2)
32
+ data_file.close()
33
+
34
+
35
+ if __name__ == "__main__":
36
+ # split the train data
37
+ train, test = split_data("train.json", "json")
38
+
39
+ # save the train split
40
+ save_json({"data": train}, "data/train.json")
41
+
42
+ # save the test split
43
+ save_json({"data": test}, "data/test.json")
policies.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import datasets
4
+ from datasets.tasks import QuestionAnsweringExtractive
5
+
6
+
7
+ _CITATION = """"""
8
+
9
+ _DESCRIPTION = """\
10
+ Manually generated dataset for policies qa
11
+ """
12
+
13
+ _URLS = {
14
+ "train": "./data/train.json",
15
+ "test": "./data/test.json"
16
+ }
17
+
18
+
19
+ class PoliciesQAConfig(datasets.BuilderConfig):
20
+ """BuilderConfig for Ineract Policies."""
21
+
22
+ def __init__(self, **kwargs):
23
+ """BuilderConfig for Ineract Policies.
24
+ Args:
25
+ **kwargs: keyword arguments forwarded to super.
26
+ """
27
+ super(PoliciesQAConfig, self).__init__(**kwargs)
28
+
29
+
30
+ class PoliciesQA(datasets.GeneratorBasedBuilder):
31
+ """Ineract Policies: The Policy Question Answering Dataset. Version 0.1"""
32
+
33
+ BUILDER_CONFIGS = [
34
+ PoliciesQAConfig(
35
+ name="plain_text",
36
+ version=datasets.Version("1.0.0", ""),
37
+ description="Plain text",
38
+ ),
39
+ ]
40
+
41
+ DEFAULT_CONFIG_NAME = "plain_text"
42
+
43
+ def _info(self):
44
+ return datasets.DatasetInfo(
45
+ description=_DESCRIPTION,
46
+ features=datasets.Features(
47
+ {
48
+ "id": datasets.Value("string"),
49
+ "context": datasets.Value("string"),
50
+ "question": datasets.Value("string"),
51
+ "answers": datasets.features.Sequence(
52
+ {
53
+ "text": datasets.Value("string"),
54
+ "answer_start": datasets.Value("int32"),
55
+ }
56
+ ),
57
+ }
58
+ ),
59
+ # No default supervised_keys (as we have to pass both question
60
+ # and context as input).
61
+ supervised_keys=None,
62
+ homepage="ineract.com",
63
+ task_templates=[
64
+ QuestionAnsweringExtractive(
65
+ question_column="question", context_column="context", answers_column="answers"
66
+ )
67
+ ],
68
+ citation=_CITATION,
69
+ )
70
+
71
+ def _split_generators(self, dl_manager):
72
+ downloaded_files = dl_manager.download_and_extract(_URLS)
73
+
74
+ return [
75
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
76
+ "filepath": downloaded_files["train"], "split": "train"}),
77
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={
78
+ "filepath": downloaded_files["test"], "split": "test"})
79
+ ]
80
+
81
+ def _generate_examples(self, filepath, split):
82
+ """This function returns the examples in the raw (text) form."""
83
+ key = 0
84
+ with open(filepath, encoding="utf-8") as f:
85
+ policies = json.load(f)
86
+ for policy in policies["data"]:
87
+ id = policy["id"]
88
+ context = policy["context"]
89
+ question = policy["question"]
90
+ answer_starts = [answer_start
91
+ for answer_start in policy["answers"]["answer_start"]]
92
+ answers = [
93
+ answer_text for answer_text in policy["answers"]["text"]]
94
+ yield key, {
95
+ "id": id,
96
+ "context": context,
97
+ "question": question,
98
+ "answers": {
99
+ "answer_start": answer_starts,
100
+ "text": answers,
101
+ },
102
+ }
103
+ key += 1
train.json ADDED
The diff for this file is too large to render. See raw diff