vosadcii commited on
Commit
a1d99b9
1 Parent(s): 9905cd0

regenerated readme

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ cache_policies
README.md CHANGED
@@ -29,9 +29,12 @@ dataset_info:
29
  config_name: plain_text
30
  splits:
31
  - name: train
32
- num_bytes: 1305279
33
- num_examples: 13480
34
- download_size: 0
 
 
 
35
  dataset_size: 1305279
36
  ---
37
 
 
29
  config_name: plain_text
30
  splits:
31
  - name: train
32
+ num_bytes: 1174320
33
+ num_examples: 12132
34
+ - name: test
35
+ num_bytes: 130959
36
+ num_examples: 1348
37
+ download_size: 3325946
38
  dataset_size: 1305279
39
  ---
40
 
create_policies_data.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset, DatasetDict
2
+ dataset_config = {
3
+ "LOADING_SCRIPT_FILES": "policies.py",
4
+ "CONFIG_NAME": "plain_text",
5
+ "DATA_DIR": "data",
6
+ "CACHE_DIR": "cache_policies",
7
+ }
8
+
9
+ ds = load_dataset(
10
+ dataset_config["LOADING_SCRIPT_FILES"],
11
+ dataset_config["CONFIG_NAME"],
12
+ data_dir=dataset_config["DATA_DIR"],
13
+ cache_dir=dataset_config["CACHE_DIR"]
14
+ )
15
+
16
+ # 90% train, 10% test + validation
17
+ # Split the 10% test + valid in half test, half valid
18
+ train_testvalid = ds["train"].train_test_split(shuffle=True, test_size=0.1)
19
+ test_valid = train_testvalid["test"].train_test_split(
20
+ test_size=0.5) # gather everything into a single DatasetDict
21
+ ds = DatasetDict({
22
+ "train": train_testvalid["train"],
23
+ "test": test_valid["test"],
24
+ "val": test_valid["train"],
25
+ }
26
+ )
27
+
28
+ print(ds)
data/test.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/train.json CHANGED
The diff for this file is too large to render. See raw diff
 
data_preparation.py CHANGED
@@ -37,7 +37,7 @@ if __name__ == "__main__":
37
  train, test = split_data("policies-qa-training.json", "json")
38
 
39
  # save the train split
40
- save_json(train, "train.json")
41
 
42
  # save the test split
43
- save_json(test, "test.json")
 
37
  train, test = split_data("policies-qa-training.json", "json")
38
 
39
  # save the train split
40
+ save_json({"data": train}, "train.json")
41
 
42
  # save the test split
43
+ save_json({"data": test}, "test.json")
dataset_info.json DELETED
@@ -1 +0,0 @@
1
- {"description": "Manually generated dataset for policies qa\n", "citation": "", "homepage": "ineract.com", "license": "", "features": {"context": {"dtype": "string", "_type": "Value"}, "question": {"dtype": "string", "_type": "Value"}, "answers": {"feature": {"text": {"dtype": "string", "_type": "Value"}, "answer_start": {"dtype": "int32", "_type": "Value"}}, "_type": "Sequence"}}, "task_templates": [{"task": "question-answering-extractive"}], "builder_name": "policies-qa", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1305279, "num_examples": 13480, "dataset_name": "policies-qa"}}, "download_checksums": {}, "download_size": 0, "dataset_size": 1305279, "size_in_bytes": 1305279}
 
 
policies.py CHANGED
@@ -1,4 +1,5 @@
1
  import json
 
2
 
3
  import datasets
4
  from datasets.tasks import QuestionAnsweringExtractive
@@ -13,9 +14,9 @@ Manually generated dataset for policies qa
13
  """
14
 
15
  _URLS = {
16
- "train": "./data/train.json",
17
- "test": "./data/test.json"
18
- }
19
 
20
 
21
  class PoliciesQAConfig(datasets.BuilderConfig):
@@ -74,10 +75,13 @@ class PoliciesQA(datasets.GeneratorBasedBuilder):
74
 
75
  return [
76
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
77
- "filepath": downloaded_files["train"]})
 
 
78
  ]
79
 
80
- def _generate_examples(self, filepath):
 
81
  """This function returns the examples in the raw (text) form."""
82
  logger.info("generating examples from = %s", filepath)
83
  key = 0
@@ -90,8 +94,6 @@ class PoliciesQA(datasets.GeneratorBasedBuilder):
90
  for answer_start in policy["answers"]["answer_start"]]
91
  answers = [
92
  answer_text for answer_text in policy["answers"]["text"]]
93
- # Features currently used are "context", "question", and "answers".
94
- # Others are extracted here for the ease of future expansions.
95
  yield key, {
96
  "context": context,
97
  "question": question,
 
1
  import json
2
+ import os
3
 
4
  import datasets
5
  from datasets.tasks import QuestionAnsweringExtractive
 
14
  """
15
 
16
  _URLS = {
17
+ "train": "./data/train.json",
18
+ "test": "./data/test.json"
19
+ }
20
 
21
 
22
  class PoliciesQAConfig(datasets.BuilderConfig):
 
75
 
76
  return [
77
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
78
+ "filepath": downloaded_files["train"], "split": "train"}),
79
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={
80
+ "filepath": downloaded_files["test"], "split": "test"})
81
  ]
82
 
83
+ def _generate_examples(self, filepath, split):
84
+ print(os.fspath(filepath))
85
  """This function returns the examples in the raw (text) form."""
86
  logger.info("generating examples from = %s", filepath)
87
  key = 0
 
94
  for answer_start in policy["answers"]["answer_start"]]
95
  answers = [
96
  answer_text for answer_text in policy["answers"]["text"]]
 
 
97
  yield key, {
98
  "context": context,
99
  "question": question,