Datasets:

Languages: English
Multilinguality: monolingual
Size Categories: 10K<n<100K
Language Creators: found
Annotations Creators: crowdsourced
Source Datasets: original
License:
system HF staff commited on
Commit
df3247d
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. dataset_infos.json +1 -0
  3. dummy/0.1.0/dummy_data.zip +3 -0
  4. wiki_qa.py +97 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"default": {"description": "Wiki Question Answering corpus from Microsoft\n", "citation": "@InProceedings{YangYihMeek:EMNLP2015:WikiQA,\n author = {{Yi}, Yang and {Wen-tau}, Yih and {Christopher} Meek},\n title = \"{WikiQA: A Challenge Dataset for Open-Domain Question Answering}\",\n journal = {Association for Computational Linguistics},\n year = 2015,\n doi = {10.18653/v1/D15-1237},\n pages = {2013\u20132018},\n}\n", "homepage": "https://www.microsoft.com/en-us/download/details.aspx?id=52419", "license": "", "features": {"question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "document_title": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["0", "1"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "supervised_keys": null, "builder_name": "wiki_qa", "config_name": "default", "version": {"version_str": "0.1.0", "description": null, "datasets_version_to_prepare": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1337903, "num_examples": 6165, "dataset_name": "wiki_qa"}, "train": {"name": "train", "num_bytes": 4469148, "num_examples": 20360, "dataset_name": "wiki_qa"}, "validation": {"name": "validation", "num_bytes": 591833, "num_examples": 2733, "dataset_name": "wiki_qa"}}, "download_checksums": {"https://download.microsoft.com/download/E/5/f/E5FCFCEE-7005-4814-853D-DAA7C66507E0/WikiQACorpus.zip": {"num_bytes": 7094233, "checksum": "467c13f9e104552c0a9c16f41836ca8d89f9c0cc4b6e4355e104d5c3109ffa45"}}, "download_size": 7094233, "dataset_size": 6398884, "size_in_bytes": 13493117}}
dummy/0.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d7238a3a8d7e6f18ef01eacdb01fdcd3ba855fbf9c95b9e30040c488301a741
3
+ size 1766
wiki_qa.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(wiki_qa): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import csv
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO(wiki_qa): BibTeX citation
12
+ _CITATION = """\
13
+ @InProceedings{YangYihMeek:EMNLP2015:WikiQA,
14
+ author = {{Yi}, Yang and {Wen-tau}, Yih and {Christopher} Meek},
15
+ title = "{WikiQA: A Challenge Dataset for Open-Domain Question Answering}",
16
+ journal = {Association for Computational Linguistics},
17
+ year = 2015,
18
+ doi = {10.18653/v1/D15-1237},
19
+ pages = {2013–2018},
20
+ }
21
+ """
22
+
23
+ # TODO(wiki_qa):
24
+ _DESCRIPTION = """\
25
+ Wiki Question Answering corpus from Microsoft
26
+ """
27
+
28
+ _DATA_URL = "https://download.microsoft.com/download/E/5/f/E5FCFCEE-7005-4814-853D-DAA7C66507E0/WikiQACorpus.zip" # 'https://www.microsoft.com/en-us/download/confirmation.aspx?id=52419'
29
+
30
+
31
+ class WikiQa(datasets.GeneratorBasedBuilder):
32
+ """TODO(wiki_qa): Short description of my dataset."""
33
+
34
+ # TODO(wiki_qa): Set up version.
35
+ VERSION = datasets.Version("0.1.0")
36
+
37
+ def _info(self):
38
+ # TODO(wiki_qa): Specifies the datasets.DatasetInfo object
39
+ return datasets.DatasetInfo(
40
+ # This is the description that will appear on the datasets page.
41
+ description=_DESCRIPTION,
42
+ # datasets.features.FeatureConnectors
43
+ features=datasets.Features(
44
+ {
45
+ "question_id": datasets.Value("string"),
46
+ "question": datasets.Value("string"),
47
+ "document_title": datasets.Value("string"),
48
+ "answer": datasets.Value("string"),
49
+ "label": datasets.features.ClassLabel(num_classes=2),
50
+ # These are the features of your dataset like images, labels ...
51
+ }
52
+ ),
53
+ # If there's a common (input, target) tuple from the features,
54
+ # specify them here. They'll be used if as_supervised=True in
55
+ # builder.as_dataset.
56
+ supervised_keys=None,
57
+ # Homepage of the dataset for documentation
58
+ homepage="https://www.microsoft.com/en-us/download/details.aspx?id=52419",
59
+ citation=_CITATION,
60
+ )
61
+
62
+ def _split_generators(self, dl_manager):
63
+ """Returns SplitGenerators."""
64
+ # TODO(wiki_qa): Downloads the data and defines the splits
65
+ # dl_manager is a datasets.download.DownloadManager that can be used to
66
+ # download and extract URLs
67
+ dl_dir = dl_manager.download_and_extract(_DATA_URL)
68
+ dl_dir = os.path.join(dl_dir, "WikiQACorpus")
69
+ # dl_dir = os.path.join(dl_dir, '')
70
+ return [
71
+ datasets.SplitGenerator(
72
+ name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(dl_dir, "WikiQA-test.tsv")}
73
+ ),
74
+ datasets.SplitGenerator(
75
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(dl_dir, "WikiQA-dev.tsv")}
76
+ ),
77
+ datasets.SplitGenerator(
78
+ name=datasets.Split.TRAIN,
79
+ # These kwargs will be passed to _generate_examples
80
+ gen_kwargs={"filepath": os.path.join(dl_dir, "WikiQA-train.tsv")},
81
+ ),
82
+ ]
83
+
84
+ def _generate_examples(self, filepath):
85
+ """Yields examples."""
86
+ # TODO(wiki_qa): Yields (key, example) tuples from the dataset
87
+
88
+ with open(filepath, encoding="utf-8") as f:
89
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
90
+ for idx, row in enumerate(reader):
91
+ yield idx, {
92
+ "question_id": row["QuestionID"],
93
+ "question": row["Question"],
94
+ "document_title": row["DocumentTitle"],
95
+ "answer": row["Sentence"],
96
+ "label": row["Label"],
97
+ }