parquet-converter commited on
Commit
01122d3
1 Parent(s): a635872

Update parquet files

Browse files
deepage2_qa_dataset.py DELETED
@@ -1,69 +0,0 @@
1
- """TODO: DATASET SHORT DESCRIPTION"""
2
-
3
- import json
4
- import datasets
5
- from datasets.tasks import TextClassification
6
-
7
- _DESCRIPTION = """TODO: DATASET DESCRIPTION"""
8
-
9
- _CITATION = """TODO: CITATIONS"""
10
-
11
- _URL = "https://huggingface.co/datasets/rsgrava/deepage2_qa_dataset/resolve/main/"
12
- _URLS = {
13
- "train": _URL + "expanded_ds-train.json",
14
- "dev": _URL + "expanded_ds-test.json",
15
- }
16
-
17
- class SquadV1PtBr(datasets.GeneratorBasedBuilder):
18
- """TODO: SHORT DATASET DESCRIPTION"""
19
-
20
- VERSION = datasets.Version("1.1.0")
21
-
22
- def _info(self):
23
- return datasets.DatasetInfo(
24
- description=_DESCRIPTION,
25
- features=datasets.Features(
26
- {
27
- "source_text": datasets.Value("string"),
28
- "target_text": datasets.Value("string"),
29
- }
30
- ),
31
- # No default supervised_keys (as we have to pass both question and context as input).
32
- supervised_keys=None,
33
- homepage="TODO: HOMEPAGE",
34
- citation=_CITATION,
35
- )
36
-
37
- def _split_generators(self, dl_manager):
38
- urls_to_download = _URLS
39
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
40
-
41
- return [
42
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
43
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
44
- ]
45
-
46
- def _generate_examples(self, filepath):
47
- with open(filepath) as f:
48
- key = 0
49
- ids = []
50
- squad = json.load(f)
51
- for article in squad["data"]:
52
- for paragraph in article["paragraphs"]:
53
- source_text = paragraph["context"].strip()
54
- questions = []
55
- for qa in paragraph["qas"]:
56
- id_ = qa["id"]
57
- if id_ in ids:
58
- #skip duplicate entries
59
- continue
60
- else:
61
- ids.append(id_)
62
- questions.append(qa["question"].strip())
63
- target_text = "<sep>".join(questions)
64
-
65
- yield key, {
66
- "source_text": source_text,
67
- "target_text": target_text,
68
- }
69
- key += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
default/deepage2_qa_dataset-train.parquet ADDED
Binary file (593 kB). View file
 
default/deepage2_qa_dataset-validation.parquet ADDED
Binary file (128 kB). View file
 
expanded_ds-dev.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:cff81e597d24726e61167b81aef2f608c817138bc44cf20d97b9d29ecd0aa764
3
- size 276756
 
 
 
 
expanded_ds-train.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:176ee5873fa73d35465c8b186b3e96320d9b34d1575915553f4397d30a09fe87
3
- size 1296533
 
 
 
 
expanded_ds-validation.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c81a2baf95dd66bf22a7853636511ec05b4e8e570d18290ceaf01a55d4c6dae1
3
- size 276270