parquet-converter commited on
Commit
3375597
1 Parent(s): 8dcda33

Update parquet files

Browse files
.gitignore DELETED
@@ -1,2 +0,0 @@
1
- cache_policies
2
- .DS_Store
 
 
 
README.md DELETED
@@ -1,26 +0,0 @@
1
- ---
2
- dataset_info:
3
- features:
4
- - name: id
5
- dtype: string
6
- - name: context
7
- dtype: string
8
- - name: question
9
- dtype: string
10
- - name: answers
11
- sequence:
12
- - name: text
13
- dtype: string
14
- - name: answer_start
15
- dtype: int32
16
- config_name: plain_text
17
- splits:
18
- - name: train
19
- num_bytes: 1846196
20
- num_examples: 4500
21
- - name: test
22
- num_bytes: 201754
23
- num_examples: 501
24
- download_size: 2875518
25
- dataset_size: 2047950
26
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_preparation.py DELETED
@@ -1,43 +0,0 @@
1
- import json
2
- from sklearn.model_selection import train_test_split
3
-
4
-
5
- def split_data(file_name: str, data_type: str):
6
- if data_type == "json":
7
- with open(file_name, 'r') as json_file:
8
- data = json.load(json_file)["data"]
9
- json_file.close()
10
-
11
- train, test = train_test_split(data, train_size=0.9, random_state=42)
12
- return(train, test)
13
-
14
-
15
- def save_json(data: dict, file_name: str):
16
- """
17
- Method to save the json file.
18
- Parameters:
19
- ----------
20
- data: dict,
21
- data to be saved in file.
22
- file_name: str,
23
- name of the file.
24
- Returns:
25
- --------
26
- None
27
- """
28
-
29
- # save the split
30
- with open(file_name, "w") as data_file:
31
- json.dump(data, data_file, indent=2)
32
- data_file.close()
33
-
34
-
35
- if __name__ == "__main__":
36
- # split the train data
37
- train, test = split_data("train.json", "json")
38
-
39
- # save the train split
40
- save_json({"data": train}, "data/train.json")
41
-
42
- # save the test split
43
- save_json({"data": test}, "data/test.json")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test.json → plain_text/policies-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6fd3c19c7a55f1ca28f20dec5eb3c9435df4e6bc96182b9fbaca9c8608446dfa
3
- size 284488
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ba36886ba51f82d8865ec4316114e59bfb05f27965140d44616450869c3222e
3
+ size 132260
data/train.json → plain_text/policies-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1c86f7ad020320f143cd944214b72a59e8b88dacb73525e1c5c974fbdf89094
3
- size 2591030
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13af37b7cf33e17f7f179628dd705d3e88c81bb17ff0f3f51ffb8c459c5b6027
3
+ size 1177092
policies.py DELETED
@@ -1,103 +0,0 @@
1
- import json
2
-
3
- import datasets
4
- from datasets.tasks import QuestionAnsweringExtractive
5
-
6
-
7
- _CITATION = """"""
8
-
9
- _DESCRIPTION = """\
10
- Manually generated dataset for policies qa
11
- """
12
-
13
- _URLS = {
14
- "train": "./data/train.json",
15
- "test": "./data/test.json"
16
- }
17
-
18
-
19
- class PoliciesQAConfig(datasets.BuilderConfig):
20
- """BuilderConfig for Ineract Policies."""
21
-
22
- def __init__(self, **kwargs):
23
- """BuilderConfig for Ineract Policies.
24
- Args:
25
- **kwargs: keyword arguments forwarded to super.
26
- """
27
- super(PoliciesQAConfig, self).__init__(**kwargs)
28
-
29
-
30
- class PoliciesQA(datasets.GeneratorBasedBuilder):
31
- """Ineract Policies: The Policy Question Answering Dataset. Version 0.1"""
32
-
33
- BUILDER_CONFIGS = [
34
- PoliciesQAConfig(
35
- name="plain_text",
36
- version=datasets.Version("1.0.0", ""),
37
- description="Plain text",
38
- ),
39
- ]
40
-
41
- DEFAULT_CONFIG_NAME = "plain_text"
42
-
43
- def _info(self):
44
- return datasets.DatasetInfo(
45
- description=_DESCRIPTION,
46
- features=datasets.Features(
47
- {
48
- "id": datasets.Value("string"),
49
- "context": datasets.Value("string"),
50
- "question": datasets.Value("string"),
51
- "answers": datasets.features.Sequence(
52
- {
53
- "text": datasets.Value("string"),
54
- "answer_start": datasets.Value("int32"),
55
- }
56
- ),
57
- }
58
- ),
59
- # No default supervised_keys (as we have to pass both question
60
- # and context as input).
61
- supervised_keys=None,
62
- homepage="ineract.com",
63
- task_templates=[
64
- QuestionAnsweringExtractive(
65
- question_column="question", context_column="context", answers_column="answers"
66
- )
67
- ],
68
- citation=_CITATION,
69
- )
70
-
71
- def _split_generators(self, dl_manager):
72
- downloaded_files = dl_manager.download_and_extract(_URLS)
73
-
74
- return [
75
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
76
- "filepath": downloaded_files["train"], "split": "train"}),
77
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={
78
- "filepath": downloaded_files["test"], "split": "test"})
79
- ]
80
-
81
- def _generate_examples(self, filepath, split):
82
- """This function returns the examples in the raw (text) form."""
83
- key = 0
84
- with open(filepath, encoding="utf-8") as f:
85
- policies = json.load(f)
86
- for policy in policies["data"]:
87
- id = policy["id"]
88
- context = policy["context"]
89
- question = policy["question"]
90
- answer_starts = [answer_start
91
- for answer_start in policy["answers"]["answer_start"]]
92
- answers = [
93
- answer_text for answer_text in policy["answers"]["text"]]
94
- yield key, {
95
- "id": id,
96
- "context": context,
97
- "question": question,
98
- "answers": {
99
- "answer_start": answer_starts,
100
- "text": answers,
101
- },
102
- }
103
- key += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
train.json DELETED
The diff for this file is too large to render. See raw diff