Datasets:

Languages: English
Multilinguality: monolingual
Size Categories: 10K<n<100K
Language Creators: found
Annotations Creators: crowdsourced
Source Datasets: original
License:
albertvillanova HF staff commited on
Commit
3f10467
1 Parent(s): 4cdcefa

Convert dataset to Parquet (#3)

Browse files

- Convert dataset to Parquet (2abf38b6b1f827c5051b29d223977203e3c8c821)
- Delete loading script (27cb3a4ece82a68a3f40eb27a1477a3a3cbb189d)
- Delete legacy dataset_infos.json (b7c3ae318d74452e2c8f4532f33848698e10a911)

README.md CHANGED
@@ -37,16 +37,25 @@ dataset_info:
37
  '1': '1'
38
  splits:
39
  - name: test
40
- num_bytes: 1337903
41
  num_examples: 6165
42
- - name: train
43
- num_bytes: 4469148
44
- num_examples: 20360
45
  - name: validation
46
- num_bytes: 591833
47
  num_examples: 2733
48
- download_size: 7094233
49
- dataset_size: 6398884
 
 
 
 
 
 
 
 
 
 
 
 
50
  ---
51
 
52
  # Dataset Card for "wiki_qa"
37
  '1': '1'
38
  splits:
39
  - name: test
40
+ num_bytes: 1333261
41
  num_examples: 6165
 
 
 
42
  - name: validation
43
+ num_bytes: 589765
44
  num_examples: 2733
45
+ - name: train
46
+ num_bytes: 4453862
47
+ num_examples: 20360
48
+ download_size: 2861208
49
+ dataset_size: 6376888
50
+ configs:
51
+ - config_name: default
52
+ data_files:
53
+ - split: test
54
+ path: data/test-*
55
+ - split: validation
56
+ path: data/validation-*
57
+ - split: train
58
+ path: data/train-*
59
  ---
60
 
61
  # Dataset Card for "wiki_qa"
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb309da359eda13c4a68fcd0dbff5cc7fc4b73d8fda2585d162606dc035b9c48
3
+ size 593691
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55e3c86de5f7922f68320e8a7f527a854e6add14f9e5f72b9cca97efcdc915ea
3
+ size 2004001
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b774df06cb641feab1e7e3d01aa80eada5443b6a6c2807233d7837c2c50bc4e
3
+ size 263516
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "Wiki Question Answering corpus from Microsoft\n", "citation": "@InProceedings{YangYihMeek:EMNLP2015:WikiQA,\n author = {{Yi}, Yang and {Wen-tau}, Yih and {Christopher} Meek},\n title = \"{WikiQA: A Challenge Dataset for Open-Domain Question Answering}\",\n journal = {Association for Computational Linguistics},\n year = 2015,\n doi = {10.18653/v1/D15-1237},\n pages = {2013\u20132018},\n}\n", "homepage": "https://www.microsoft.com/en-us/download/details.aspx?id=52419", "license": "", "features": {"question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "document_title": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["0", "1"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "supervised_keys": null, "builder_name": "wiki_qa", "config_name": "default", "version": {"version_str": "0.1.0", "description": null, "datasets_version_to_prepare": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1337903, "num_examples": 6165, "dataset_name": "wiki_qa"}, "train": {"name": "train", "num_bytes": 4469148, "num_examples": 20360, "dataset_name": "wiki_qa"}, "validation": {"name": "validation", "num_bytes": 591833, "num_examples": 2733, "dataset_name": "wiki_qa"}}, "download_checksums": {"https://download.microsoft.com/download/E/5/f/E5FCFCEE-7005-4814-853D-DAA7C66507E0/WikiQACorpus.zip": {"num_bytes": 7094233, "checksum": "467c13f9e104552c0a9c16f41836ca8d89f9c0cc4b6e4355e104d5c3109ffa45"}}, "download_size": 7094233, "dataset_size": 6398884, "size_in_bytes": 13493117}}
 
wiki_qa.py DELETED
@@ -1,96 +0,0 @@
1
- """TODO(wiki_qa): Add a description here."""
2
-
3
-
4
- import csv
5
- import os
6
-
7
- import datasets
8
-
9
-
10
- # TODO(wiki_qa): BibTeX citation
11
- _CITATION = """\
12
- @InProceedings{YangYihMeek:EMNLP2015:WikiQA,
13
- author = {{Yi}, Yang and {Wen-tau}, Yih and {Christopher} Meek},
14
- title = "{WikiQA: A Challenge Dataset for Open-Domain Question Answering}",
15
- journal = {Association for Computational Linguistics},
16
- year = 2015,
17
- doi = {10.18653/v1/D15-1237},
18
- pages = {2013–2018},
19
- }
20
- """
21
-
22
- # TODO(wiki_qa):
23
- _DESCRIPTION = """\
24
- Wiki Question Answering corpus from Microsoft
25
- """
26
-
27
- _DATA_URL = "https://download.microsoft.com/download/E/5/f/E5FCFCEE-7005-4814-853D-DAA7C66507E0/WikiQACorpus.zip" # 'https://www.microsoft.com/en-us/download/confirmation.aspx?id=52419'
28
-
29
-
30
- class WikiQa(datasets.GeneratorBasedBuilder):
31
- """TODO(wiki_qa): Short description of my dataset."""
32
-
33
- # TODO(wiki_qa): Set up version.
34
- VERSION = datasets.Version("0.1.0")
35
-
36
- def _info(self):
37
- # TODO(wiki_qa): Specifies the datasets.DatasetInfo object
38
- return datasets.DatasetInfo(
39
- # This is the description that will appear on the datasets page.
40
- description=_DESCRIPTION,
41
- # datasets.features.FeatureConnectors
42
- features=datasets.Features(
43
- {
44
- "question_id": datasets.Value("string"),
45
- "question": datasets.Value("string"),
46
- "document_title": datasets.Value("string"),
47
- "answer": datasets.Value("string"),
48
- "label": datasets.features.ClassLabel(num_classes=2),
49
- # These are the features of your dataset like images, labels ...
50
- }
51
- ),
52
- # If there's a common (input, target) tuple from the features,
53
- # specify them here. They'll be used if as_supervised=True in
54
- # builder.as_dataset.
55
- supervised_keys=None,
56
- # Homepage of the dataset for documentation
57
- homepage="https://www.microsoft.com/en-us/download/details.aspx?id=52419",
58
- citation=_CITATION,
59
- )
60
-
61
- def _split_generators(self, dl_manager):
62
- """Returns SplitGenerators."""
63
- # TODO(wiki_qa): Downloads the data and defines the splits
64
- # dl_manager is a datasets.download.DownloadManager that can be used to
65
- # download and extract URLs
66
- dl_dir = dl_manager.download_and_extract(_DATA_URL)
67
- dl_dir = os.path.join(dl_dir, "WikiQACorpus")
68
- # dl_dir = os.path.join(dl_dir, '')
69
- return [
70
- datasets.SplitGenerator(
71
- name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(dl_dir, "WikiQA-test.tsv")}
72
- ),
73
- datasets.SplitGenerator(
74
- name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(dl_dir, "WikiQA-dev.tsv")}
75
- ),
76
- datasets.SplitGenerator(
77
- name=datasets.Split.TRAIN,
78
- # These kwargs will be passed to _generate_examples
79
- gen_kwargs={"filepath": os.path.join(dl_dir, "WikiQA-train.tsv")},
80
- ),
81
- ]
82
-
83
- def _generate_examples(self, filepath):
84
- """Yields examples."""
85
- # TODO(wiki_qa): Yields (key, example) tuples from the dataset
86
-
87
- with open(filepath, encoding="utf-8") as f:
88
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
89
- for idx, row in enumerate(reader):
90
- yield idx, {
91
- "question_id": row["QuestionID"],
92
- "question": row["Question"],
93
- "document_title": row["DocumentTitle"],
94
- "answer": row["Sentence"],
95
- "label": row["Label"],
96
- }